aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS11
-rw-r--r--Documentation/ABI/testing/sysfs-bus-umc28
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb43
-rw-r--r--Documentation/ABI/testing/sysfs-class-usb_host25
-rw-r--r--Documentation/ABI/testing/sysfs-class-uwb_rc144
-rw-r--r--Documentation/ABI/testing/sysfs-wusb_cbaf100
-rw-r--r--Documentation/feature-removal-schedule.txt8
-rw-r--r--Documentation/i2c/busses/i2c-i8017
-rw-r--r--Documentation/i2c/porting-clients160
-rw-r--r--Documentation/i2c/writing-clients491
-rw-r--r--Documentation/ia64/xen.txt183
-rw-r--r--Documentation/kdump/kdump.txt14
-rw-r--r--Documentation/powerpc/booting-without-of.txt2
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/board.txt4
-rw-r--r--Documentation/usb/WUSB-Design-overview.txt448
-rw-r--r--Documentation/usb/wusb-cbaf139
-rw-r--r--MAINTAINERS33
-rw-r--r--arch/alpha/oprofile/common.c2
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/common/Kconfig2
-rw-r--r--arch/arm/common/sa1111.c4
-rw-r--r--arch/arm/configs/trizeps4_defconfig1
-rw-r--r--arch/arm/mach-clps711x/include/mach/memory.h2
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig5
-rw-r--r--arch/arm/mach-kirkwood/common.c35
-rw-r--r--arch/arm/mach-kirkwood/common.h2
-rw-r--r--arch/arm/mach-kirkwood/rd88f6281-setup.c11
-rw-r--r--arch/arm/mach-mv78xx0/db78x00-bp-setup.c5
-rw-r--r--arch/arm/mach-orion5x/common.c38
-rw-r--r--arch/arm/mach-orion5x/common.h2
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c11
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-ge-setup.c11
-rw-r--r--arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c11
-rw-r--r--arch/arm/mach-orion5x/wrt350n-v2-setup.c11
-rw-r--r--arch/arm/mach-pxa/Kconfig1
-rw-r--r--arch/arm/mach-pxa/include/mach/irqs.h5
-rw-r--r--arch/arm/mach-pxa/include/mach/spitz.h2
-rw-r--r--arch/arm/mach-pxa/pwm.c2
-rw-r--r--arch/arm/mach-pxa/trizeps4.c2
-rw-r--r--arch/arm/mach-s3c2443/clock.c3
-rw-r--r--arch/arm/mm/cache-v4.S6
-rw-r--r--arch/arm/plat-s3c24xx/pwm-clock.c2
-rw-r--r--arch/arm/plat-s3c24xx/pwm.c12
-rw-r--r--arch/cris/Kconfig2
-rw-r--r--arch/h8300/Kconfig2
-rw-r--r--arch/ia64/Kconfig50
-rw-r--r--arch/ia64/Makefile3
-rw-r--r--arch/ia64/configs/generic_defconfig2
-rw-r--r--arch/ia64/configs/tiger_defconfig2
-rw-r--r--arch/ia64/dig/Makefile5
-rw-r--r--arch/ia64/dig/dig_vtd_iommu.c59
-rw-r--r--arch/ia64/dig/machvec_vtd.c3
-rw-r--r--arch/ia64/ia32/ia32_entry.S11
-rw-r--r--arch/ia64/ia32/sys_ia32.c106
-rw-r--r--arch/ia64/include/asm/break.h9
-rw-r--r--arch/ia64/include/asm/cacheflush.h2
-rw-r--r--arch/ia64/include/asm/device.h3
-rw-r--r--arch/ia64/include/asm/dma-mapping.h50
-rw-r--r--arch/ia64/include/asm/iommu.h16
-rw-r--r--arch/ia64/include/asm/kregs.h2
-rw-r--r--arch/ia64/include/asm/machvec.h4
-rw-r--r--arch/ia64/include/asm/machvec_dig_vtd.h38
-rw-r--r--arch/ia64/include/asm/machvec_init.h1
-rw-r--r--arch/ia64/include/asm/machvec_xen.h22
-rw-r--r--arch/ia64/include/asm/meminit.h3
-rw-r--r--arch/ia64/include/asm/native/inst.h10
-rw-r--r--arch/ia64/include/asm/native/pvchk_inst.h263
-rw-r--r--arch/ia64/include/asm/paravirt.h4
-rw-r--r--arch/ia64/include/asm/pci.h3
-rw-r--r--arch/ia64/include/asm/ptrace.h8
-rw-r--r--arch/ia64/include/asm/pvclock-abi.h48
-rw-r--r--arch/ia64/include/asm/swiotlb.h56
-rw-r--r--arch/ia64/include/asm/sync_bitops.h51
-rw-r--r--arch/ia64/include/asm/syscall.h163
-rw-r--r--arch/ia64/include/asm/thread_info.h3
-rw-r--r--arch/ia64/include/asm/timex.h2
-rw-r--r--arch/ia64/include/asm/unistd.h1
-rw-r--r--arch/ia64/include/asm/xen/events.h50
-rw-r--r--arch/ia64/include/asm/xen/grant_table.h29
-rw-r--r--arch/ia64/include/asm/xen/hypercall.h265
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h89
-rw-r--r--arch/ia64/include/asm/xen/inst.h458
-rw-r--r--arch/ia64/include/asm/xen/interface.h346
-rw-r--r--arch/ia64/include/asm/xen/irq.h44
-rw-r--r--arch/ia64/include/asm/xen/minstate.h134
-rw-r--r--arch/ia64/include/asm/xen/page.h65
-rw-r--r--arch/ia64/include/asm/xen/privop.h129
-rw-r--r--arch/ia64/include/asm/xen/xcom_hcall.h51
-rw-r--r--arch/ia64/include/asm/xen/xencomm.h42
-rw-r--r--arch/ia64/kernel/Makefile22
-rw-r--r--arch/ia64/kernel/acpi.c22
-rw-r--r--arch/ia64/kernel/asm-offsets.c31
-rw-r--r--arch/ia64/kernel/entry.S5
-rw-r--r--arch/ia64/kernel/ivt.S6
-rw-r--r--arch/ia64/kernel/msi_ia64.c80
-rw-r--r--arch/ia64/kernel/nr-irqs.c1
-rw-r--r--arch/ia64/kernel/paravirt.c2
-rw-r--r--arch/ia64/kernel/paravirt_inst.h4
-rw-r--r--arch/ia64/kernel/pci-dma.c129
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c46
-rw-r--r--arch/ia64/kernel/perfmon.c7
-rw-r--r--arch/ia64/kernel/process.c22
-rw-r--r--arch/ia64/kernel/ptrace.c112
-rw-r--r--arch/ia64/kernel/setup.c42
-rw-r--r--arch/ia64/kernel/signal.c8
-rw-r--r--arch/ia64/lib/flush.S55
-rw-r--r--arch/ia64/mm/tlb.c8
-rw-r--r--arch/ia64/oprofile/init.c4
-rw-r--r--arch/ia64/oprofile/perfmon.c4
-rw-r--r--arch/ia64/scripts/pvcheck.sed32
-rw-r--r--arch/ia64/xen/Kconfig26
-rw-r--r--arch/ia64/xen/Makefile22
-rw-r--r--arch/ia64/xen/grant-table.c155
-rw-r--r--arch/ia64/xen/hypercall.S91
-rw-r--r--arch/ia64/xen/hypervisor.c96
-rw-r--r--arch/ia64/xen/irq_xen.c435
-rw-r--r--arch/ia64/xen/irq_xen.h34
-rw-r--r--arch/ia64/xen/machvec.c4
-rw-r--r--arch/ia64/xen/suspend.c64
-rw-r--r--arch/ia64/xen/time.c213
-rw-r--r--arch/ia64/xen/time.h24
-rw-r--r--arch/ia64/xen/xcom_hcall.c441
-rw-r--r--arch/ia64/xen/xen_pv_ops.c364
-rw-r--r--arch/ia64/xen/xencomm.c105
-rw-r--r--arch/ia64/xen/xenivt.S52
-rw-r--r--arch/ia64/xen/xensetup.S83
-rw-r--r--arch/m32r/oprofile/init.c2
-rw-r--r--arch/mips/oprofile/common.c2
-rw-r--r--arch/mips/oprofile/op_impl.h2
-rw-r--r--arch/mips/oprofile/op_model_rm9000.c2
-rw-r--r--arch/parisc/oprofile/init.c2
-rw-r--r--arch/powerpc/Kconfig13
-rw-r--r--arch/powerpc/boot/Makefile7
-rw-r--r--arch/powerpc/boot/addnote.c41
-rw-r--r--arch/powerpc/boot/cuboot-52xx.c4
-rw-r--r--arch/powerpc/boot/cuboot-acadia.c174
-rw-r--r--arch/powerpc/boot/dts/acadia.dts224
-rw-r--r--arch/powerpc/boot/dts/hcu4.dts168
-rw-r--r--arch/powerpc/boot/dts/mpc8315erdb.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc832x_mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts16
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitxgp.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc834x_mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc836x_mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8377_rdb.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc8378_rdb.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc8379_rdb.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dts12
-rw-r--r--arch/powerpc/boot/dts/mpc8568mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dts27
-rw-r--r--arch/powerpc/boot/libfdt-wrapper.c22
-rw-r--r--arch/powerpc/boot/main.c14
-rw-r--r--arch/powerpc/boot/ops.h6
-rw-r--r--arch/powerpc/boot/string.S4
-rwxr-xr-xarch/powerpc/boot/wrapper5
-rw-r--r--arch/powerpc/configs/40x/acadia_defconfig921
-rw-r--r--arch/powerpc/configs/40x/hcu4_defconfig929
-rw-r--r--arch/powerpc/include/asm/kdump.h17
-rw-r--r--arch/powerpc/include/asm/page.h1
-rw-r--r--arch/powerpc/kernel/cputable.c13
-rw-r--r--arch/powerpc/kernel/crash_dump.c2
-rw-r--r--arch/powerpc/kernel/head_64.S39
-rw-r--r--arch/powerpc/kernel/iommu.c69
-rw-r--r--arch/powerpc/kernel/machine_kexec.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c26
-rw-r--r--arch/powerpc/kernel/misc_64.S9
-rw-r--r--arch/powerpc/kernel/pci-common.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c61
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh2
-rw-r--r--arch/powerpc/kernel/setup-common.c7
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/kernel/udbg_16550.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c6
-rw-r--r--arch/powerpc/mm/numa.c21
-rw-r--r--arch/powerpc/oprofile/cell/pr_util.h13
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c4
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c236
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c2
-rw-r--r--arch/powerpc/platforms/40x/Kconfig38
-rw-r--r--arch/powerpc/platforms/40x/Makefile2
-rw-r--r--arch/powerpc/platforms/40x/hcu4.c61
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c80
-rw-r--r--arch/powerpc/platforms/44x/Kconfig8
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c7
-rw-r--r--arch/powerpc/platforms/85xx/ksi8560.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c4
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c4
-rw-r--r--arch/powerpc/platforms/85xx/sbc8560.c4
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c4
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c4
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c2
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c2
-rw-r--r--arch/powerpc/platforms/86xx/sbc8641d.c2
-rw-r--r--arch/powerpc/platforms/cell/ras.c6
-rw-r--r--arch/powerpc/platforms/cell/smp.c10
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c155
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c24
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h1
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.c37
-rw-r--r--arch/powerpc/platforms/embedded6xx/c2k.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/prpmc2800.c3
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c6
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c6
-rw-r--r--arch/powerpc/platforms/pseries/smp.c13
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/ppc4xx_gpio.c217
-rw-r--r--arch/sparc/oprofile/init.c2
-rw-r--r--arch/sparc64/oprofile/init.c2
-rw-r--r--arch/x86/kernel/amd_iommu_init.c2
-rw-r--r--arch/x86/kernel/irq.c4
-rw-r--r--arch/x86/kernel/pci-dma.c16
-rw-r--r--arch/x86/oprofile/backtrace.c3
-rw-r--r--arch/x86/oprofile/nmi_int.c173
-rw-r--r--arch/x86/oprofile/op_counter.h18
-rw-r--r--arch/x86/oprofile/op_model_amd.c59
-rw-r--r--arch/x86/oprofile/op_model_p4.c32
-rw-r--r--arch/x86/oprofile/op_model_ppro.c120
-rw-r--r--arch/x86/oprofile/op_x86_model.h13
-rw-r--r--arch/xtensa/Kconfig7
-rw-r--r--arch/xtensa/Makefile1
-rw-r--r--arch/xtensa/kernel/irq.c2
-rw-r--r--arch/xtensa/platforms/iss/network.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/pci_slot.c2
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-eh.c26
-rw-r--r--drivers/ata/libata-sff.c11
-rw-r--r--drivers/ata/sata_via.c35
-rw-r--r--drivers/char/hvc_console.c86
-rw-r--r--drivers/char/hvc_console.h12
-rw-r--r--drivers/char/hvc_irq.c5
-rw-r--r--drivers/char/hvc_iseries.c1
-rw-r--r--drivers/char/hvc_vio.c1
-rw-r--r--drivers/char/hvc_xen.c1
-rw-r--r--drivers/char/tty_port.c2
-rw-r--r--drivers/char/virtio_console.c1
-rw-r--r--drivers/gpio/Kconfig7
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/twl4030-gpio.c521
-rw-r--r--drivers/gpu/drm/drm_drawable.c15
-rw-r--r--drivers/gpu/drm/drm_ioc32.c34
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c15
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c277
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c21
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-cpm.c1
-rw-r--r--drivers/i2c/busses/i2c-elektor.c3
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c3
-rw-r--r--drivers/i2c/chips/Kconfig2
-rw-r--r--drivers/i2c/chips/Makefile3
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/ide/Makefile84
-rw-r--r--drivers/ide/aec62xx.c (renamed from drivers/ide/pci/aec62xx.c)0
-rw-r--r--drivers/ide/ali14xx.c (renamed from drivers/ide/legacy/ali14xx.c)0
-rw-r--r--drivers/ide/alim15x3.c (renamed from drivers/ide/pci/alim15x3.c)0
-rw-r--r--drivers/ide/amd74xx.c (renamed from drivers/ide/pci/amd74xx.c)0
-rw-r--r--drivers/ide/arm/Makefile10
-rw-r--r--drivers/ide/atiixp.c (renamed from drivers/ide/pci/atiixp.c)0
-rw-r--r--drivers/ide/au1xxx-ide.c (renamed from drivers/ide/mips/au1xxx-ide.c)0
-rw-r--r--drivers/ide/buddha.c (renamed from drivers/ide/legacy/buddha.c)0
-rw-r--r--drivers/ide/cmd640.c (renamed from drivers/ide/pci/cmd640.c)0
-rw-r--r--drivers/ide/cmd64x.c (renamed from drivers/ide/pci/cmd64x.c)0
-rw-r--r--drivers/ide/cs5520.c (renamed from drivers/ide/pci/cs5520.c)0
-rw-r--r--drivers/ide/cs5530.c (renamed from drivers/ide/pci/cs5530.c)0
-rw-r--r--drivers/ide/cs5535.c (renamed from drivers/ide/pci/cs5535.c)0
-rw-r--r--drivers/ide/cy82c693.c (renamed from drivers/ide/pci/cy82c693.c)0
-rw-r--r--drivers/ide/delkin_cb.c (renamed from drivers/ide/pci/delkin_cb.c)0
-rw-r--r--drivers/ide/dtc2278.c (renamed from drivers/ide/legacy/dtc2278.c)0
-rw-r--r--drivers/ide/falconide.c (renamed from drivers/ide/legacy/falconide.c)0
-rw-r--r--drivers/ide/gayle.c (renamed from drivers/ide/legacy/gayle.c)0
-rw-r--r--drivers/ide/generic.c (renamed from drivers/ide/pci/generic.c)0
-rw-r--r--drivers/ide/h8300/Makefile2
-rw-r--r--drivers/ide/hpt366.c (renamed from drivers/ide/pci/hpt366.c)0
-rw-r--r--drivers/ide/ht6560b.c (renamed from drivers/ide/legacy/ht6560b.c)0
-rw-r--r--drivers/ide/icside.c (renamed from drivers/ide/arm/icside.c)0
-rw-r--r--drivers/ide/ide-4drives.c (renamed from drivers/ide/legacy/ide-4drives.c)0
-rw-r--r--drivers/ide/ide-cs.c (renamed from drivers/ide/legacy/ide-cs.c)0
-rw-r--r--drivers/ide/ide-h8300.c (renamed from drivers/ide/h8300/ide-h8300.c)0
-rw-r--r--drivers/ide/ide_arm.c (renamed from drivers/ide/arm/ide_arm.c)0
-rw-r--r--drivers/ide/ide_platform.c (renamed from drivers/ide/legacy/ide_platform.c)0
-rw-r--r--drivers/ide/it8213.c (renamed from drivers/ide/pci/it8213.c)0
-rw-r--r--drivers/ide/it821x.c (renamed from drivers/ide/pci/it821x.c)0
-rw-r--r--drivers/ide/jmicron.c (renamed from drivers/ide/pci/jmicron.c)0
-rw-r--r--drivers/ide/legacy/Makefile25
-rw-r--r--drivers/ide/macide.c (renamed from drivers/ide/legacy/macide.c)0
-rw-r--r--drivers/ide/mips/Makefile3
-rw-r--r--drivers/ide/ns87415.c (renamed from drivers/ide/pci/ns87415.c)0
-rw-r--r--drivers/ide/opti621.c (renamed from drivers/ide/pci/opti621.c)0
-rw-r--r--drivers/ide/palm_bk3710.c (renamed from drivers/ide/arm/palm_bk3710.c)0
-rw-r--r--drivers/ide/pci/Makefile43
-rw-r--r--drivers/ide/pdc202xx_new.c (renamed from drivers/ide/pci/pdc202xx_new.c)0
-rw-r--r--drivers/ide/pdc202xx_old.c (renamed from drivers/ide/pci/pdc202xx_old.c)0
-rw-r--r--drivers/ide/piix.c (renamed from drivers/ide/pci/piix.c)0
-rw-r--r--drivers/ide/pmac.c (renamed from drivers/ide/ppc/pmac.c)0
-rw-r--r--drivers/ide/ppc/Makefile2
-rw-r--r--drivers/ide/q40ide.c (renamed from drivers/ide/legacy/q40ide.c)0
-rw-r--r--drivers/ide/qd65xx.c (renamed from drivers/ide/legacy/qd65xx.c)0
-rw-r--r--drivers/ide/qd65xx.h (renamed from drivers/ide/legacy/qd65xx.h)0
-rw-r--r--drivers/ide/rapide.c (renamed from drivers/ide/arm/rapide.c)0
-rw-r--r--drivers/ide/rz1000.c (renamed from drivers/ide/pci/rz1000.c)0
-rw-r--r--drivers/ide/sc1200.c (renamed from drivers/ide/pci/sc1200.c)0
-rw-r--r--drivers/ide/scc_pata.c (renamed from drivers/ide/pci/scc_pata.c)0
-rw-r--r--drivers/ide/serverworks.c (renamed from drivers/ide/pci/serverworks.c)0
-rw-r--r--drivers/ide/sgiioc4.c (renamed from drivers/ide/pci/sgiioc4.c)0
-rw-r--r--drivers/ide/siimage.c (renamed from drivers/ide/pci/siimage.c)0
-rw-r--r--drivers/ide/sis5513.c (renamed from drivers/ide/pci/sis5513.c)0
-rw-r--r--drivers/ide/sl82c105.c (renamed from drivers/ide/pci/sl82c105.c)0
-rw-r--r--drivers/ide/slc90e66.c (renamed from drivers/ide/pci/slc90e66.c)0
-rw-r--r--drivers/ide/tc86c001.c (renamed from drivers/ide/pci/tc86c001.c)0
-rw-r--r--drivers/ide/triflex.c (renamed from drivers/ide/pci/triflex.c)0
-rw-r--r--drivers/ide/trm290.c (renamed from drivers/ide/pci/trm290.c)0
-rw-r--r--drivers/ide/umc8672.c (renamed from drivers/ide/legacy/umc8672.c)0
-rw-r--r--drivers/ide/via82cxxx.c (renamed from drivers/ide/pci/via82cxxx.c)0
-rw-r--r--drivers/infiniband/core/mad.c14
-rw-r--r--drivers/infiniband/core/ucma.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c83
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c10
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c11
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c67
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c4
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/dm-crypt.c56
-rw-r--r--drivers/md/dm-delay.c3
-rw-r--r--drivers/md/dm-exception-store.c108
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-kcopyd.c14
-rw-r--r--drivers/md/dm-linear.c2
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-path-selector.c3
-rw-r--r--drivers/md/dm-raid1.c791
-rw-r--r--drivers/md/dm-region-hash.c704
-rw-r--r--drivers/md/dm-round-robin.c3
-rw-r--r--drivers/md/dm-snap.c11
-rw-r--r--drivers/md/dm-snap.h5
-rw-r--r--drivers/md/dm-stripe.c6
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c49
-rw-r--r--drivers/md/dm.h9
-rw-r--r--drivers/media/common/saa7146_fops.c4
-rw-r--r--drivers/media/common/saa7146_video.c12
-rw-r--r--drivers/media/dvb/frontends/s5h1411.c84
-rw-r--r--drivers/media/dvb/frontends/s5h1411.h2
-rw-r--r--drivers/media/radio/dsbr100.c62
-rw-r--r--drivers/media/radio/radio-mr800.c5
-rw-r--r--drivers/media/video/arv.c2
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c6
-rw-r--r--drivers/media/video/c-qcam.c2
-rw-r--r--drivers/media/video/cafe_ccic.c4
-rw-r--r--drivers/media/video/cpia.c6
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c2
-rw-r--r--drivers/media/video/cx18/cx18-driver.c11
-rw-r--r--drivers/media/video/cx18/cx18-io.h4
-rw-r--r--drivers/media/video/cx18/cx18-streams.c36
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c2
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/video/cx88/cx88-cards.c4
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c11
-rw-r--r--drivers/media/video/cx88/cx88-i2c.c2
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c15
-rw-r--r--drivers/media/video/cx88/cx88-video.c6
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c24
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c12
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c1
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c13
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.h3
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c4
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-encoder.c4
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c6
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c17
-rw-r--r--drivers/media/video/pwc/pwc-if.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c6
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/video/se401.c2
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c24
-rw-r--r--drivers/media/video/stk-webcam.c4
-rw-r--r--drivers/media/video/stv680.c3
-rw-r--r--drivers/media/video/usbvideo/usbvideo.c2
-rw-r--r--drivers/media/video/usbvideo/vicam.c3
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c2
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c12
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c12
-rw-r--r--drivers/media/video/v4l1-compat.c221
-rw-r--r--drivers/media/video/v4l2-int-device.c5
-rw-r--r--drivers/media/video/v4l2-ioctl.c19
-rw-r--r--drivers/media/video/videobuf-dvb.c52
-rw-r--r--drivers/media/video/vivi.c6
-rw-r--r--drivers/media/video/w9968cf.c16
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c24
-rw-r--r--drivers/media/video/zr364xx.c2
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/sm501.c25
-rw-r--r--drivers/mfd/twl4030-core.c421
-rw-r--r--drivers/mfd/twl4030-irq.c743
-rw-r--r--drivers/mfd/wm8350-core.c5
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/fec_mpc52xx_phy.c55
-rw-r--r--drivers/net/ibm_newemac/core.c10
-rw-r--r--drivers/net/ibm_newemac/mal.c15
-rw-r--r--drivers/net/mlx4/Makefile7
-rw-r--r--drivers/net/mlx4/alloc.c97
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/en_cq.c146
-rw-r--r--drivers/net/mlx4/en_main.c254
-rw-r--r--drivers/net/mlx4/en_netdev.c1088
-rw-r--r--drivers/net/mlx4/en_params.c480
-rw-r--r--drivers/net/mlx4/en_port.c261
-rw-r--r--drivers/net/mlx4/en_port.h570
-rw-r--r--drivers/net/mlx4/en_resources.c96
-rw-r--r--drivers/net/mlx4/en_rx.c1080
-rw-r--r--drivers/net/mlx4/en_tx.c820
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/mlx4/fw.c20
-rw-r--r--drivers/net/mlx4/fw.h7
-rw-r--r--drivers/net/mlx4/main.c287
-rw-r--r--drivers/net/mlx4/mcg.c4
-rw-r--r--drivers/net/mlx4/mlx4.h45
-rw-r--r--drivers/net/mlx4/mlx4_en.h561
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mlx4/pd.c4
-rw-r--r--drivers/net/mlx4/port.c282
-rw-r--r--drivers/net/mlx4/qp.c81
-rw-r--r--drivers/net/mlx4/srq.c2
-rw-r--r--drivers/net/xtsonic.c319
-rw-r--r--drivers/of/of_i2c.c2
-rw-r--r--drivers/of/of_spi.c2
-rw-r--r--drivers/oprofile/buffer_sync.c41
-rw-r--r--drivers/oprofile/buffer_sync.h4
-rw-r--r--drivers/oprofile/cpu_buffer.c106
-rw-r--r--drivers/oprofile/cpu_buffer.h12
-rw-r--r--drivers/oprofile/event_buffer.c34
-rw-r--r--drivers/oprofile/event_buffer.h17
-rw-r--r--drivers/oprofile/oprof.c26
-rw-r--r--drivers/oprofile/oprof.h12
-rw-r--r--drivers/oprofile/oprofile_files.c36
-rw-r--r--drivers/oprofile/oprofile_stats.c24
-rw-r--r--drivers/oprofile/oprofile_stats.h10
-rw-r--r--drivers/oprofile/oprofilefs.c78
-rw-r--r--drivers/oprofile/timer_int.c4
-rw-r--r--drivers/parport/parport_pc.c20
-rw-r--r--drivers/pci/dmar.c119
-rw-r--r--drivers/pci/hotplug/acpiphp.h9
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c32
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h6
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c75
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c4
-rw-r--r--drivers/pci/hotplug/cpqphp.h13
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c43
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/fakephp.c26
-rw-r--r--drivers/pci/hotplug/ibmphp.h5
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c19
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c64
-rw-r--r--drivers/pci/hotplug/pciehp.h10
-rw-r--r--drivers/pci/hotplug/pciehp_core.c49
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c56
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c60
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c10
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c18
-rw-r--r--drivers/pci/hotplug/shpchp.h9
-rw-r--r--drivers/pci/hotplug/shpchp_core.c52
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c48
-rw-r--r--drivers/pci/intel-iommu.c250
-rw-r--r--drivers/pci/msi.c21
-rw-r--r--drivers/pci/pci-acpi.c85
-rw-r--r--drivers/pci/pci.c101
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pci/quirks.c14
-rw-r--r--drivers/pci/search.c9
-rw-r--r--drivers/pci/slot.c160
-rw-r--r--drivers/pcmcia/Makefile2
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-twl4030.c564
-rw-r--r--drivers/serial/8250_pci.c231
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/at76_usb/at76_usb.c4
-rw-r--r--drivers/staging/echo/bit_operations.h205
-rw-r--r--drivers/staging/echo/echo.c835
-rw-r--r--drivers/staging/echo/echo.h58
-rw-r--r--drivers/staging/echo/fir.h376
-rw-r--r--drivers/staging/echo/mmx.h29
-rw-r--r--drivers/staging/echo/oslec.h86
-rw-r--r--drivers/staging/et131x/et1310_phy.c2
-rw-r--r--drivers/staging/et131x/et131x_debug.c1
-rw-r--r--drivers/staging/et131x/et131x_initpci.c1
-rw-r--r--drivers/staging/go7007/go7007-driver.c1
-rw-r--r--drivers/staging/go7007/go7007-fw.c1
-rw-r--r--drivers/staging/go7007/go7007-i2c.c1
-rw-r--r--drivers/staging/go7007/go7007-usb.c1
-rw-r--r--drivers/staging/go7007/snd-go7007.c1
-rw-r--r--drivers/staging/go7007/wis-ov7640.c1
-rw-r--r--drivers/staging/go7007/wis-saa7113.c1
-rw-r--r--drivers/staging/go7007/wis-saa7115.c1
-rw-r--r--drivers/staging/go7007/wis-sony-tuner.c1
-rw-r--r--drivers/staging/go7007/wis-tw2804.c1
-rw-r--r--drivers/staging/go7007/wis-tw9903.c1
-rw-r--r--drivers/staging/go7007/wis-uda1342.c1
-rw-r--r--drivers/staging/me4000/me4000.c908
-rw-r--r--drivers/staging/me4000/me4000.h194
-rw-r--r--drivers/staging/pcc-acpi/Kconfig11
-rw-r--r--drivers/staging/pcc-acpi/Makefile1
-rw-r--r--drivers/staging/pcc-acpi/TODO7
-rw-r--r--drivers/staging/pcc-acpi/pcc-acpi.c1111
-rw-r--r--drivers/staging/poch/Kconfig6
-rw-r--r--drivers/staging/poch/Makefile1
-rw-r--r--drivers/staging/poch/README7
-rw-r--r--drivers/staging/poch/poch.c1425
-rw-r--r--drivers/staging/poch/poch.h29
-rw-r--r--drivers/staging/slicoss/slicoss.c18
-rw-r--r--drivers/staging/sxg/README1
-rw-r--r--drivers/staging/sxg/sxg.c1379
-rw-r--r--drivers/staging/sxg/sxg_os.h41
-rw-r--r--drivers/staging/sxg/sxgdbg.h2
-rw-r--r--drivers/staging/sxg/sxghif.h410
-rw-r--r--drivers/staging/sxg/sxghw.h404
-rw-r--r--drivers/staging/sxg/sxgphycode.h12
-rw-r--r--drivers/staging/usbip/usbip_common.c2
-rw-r--r--drivers/staging/usbip/vhci_rx.c2
-rw-r--r--drivers/staging/winbond/Kconfig2
-rw-r--r--drivers/staging/winbond/README1
-rw-r--r--drivers/staging/winbond/bss_f.h6
-rw-r--r--drivers/staging/winbond/ds_tkip.h6
-rw-r--r--drivers/staging/winbond/linux/common.h17
-rw-r--r--drivers/staging/winbond/linux/wb35reg.c63
-rw-r--r--drivers/staging/winbond/linux/wb35reg_f.h12
-rw-r--r--drivers/staging/winbond/linux/wb35reg_s.h4
-rw-r--r--drivers/staging/winbond/linux/wb35rx.c175
-rw-r--r--drivers/staging/winbond/linux/wb35rx_s.h2
-rw-r--r--drivers/staging/winbond/linux/wb35tx.c138
-rw-r--r--drivers/staging/winbond/linux/wb35tx_f.h2
-rw-r--r--drivers/staging/winbond/linux/wbusb.c259
-rw-r--r--drivers/staging/winbond/mds.c30
-rw-r--r--drivers/staging/winbond/mds_f.h6
-rw-r--r--drivers/staging/winbond/mds_s.h8
-rw-r--r--drivers/staging/winbond/mlme_s.h4
-rw-r--r--drivers/staging/winbond/mlmetxrx.c4
-rw-r--r--drivers/staging/winbond/mlmetxrx_f.h4
-rw-r--r--drivers/staging/winbond/reg.c24
-rw-r--r--drivers/staging/winbond/sme_api.c1
-rw-r--r--drivers/staging/winbond/sme_api.h2
-rw-r--r--drivers/staging/winbond/wbhal.c32
-rw-r--r--drivers/staging/winbond/wbhal_f.h28
-rw-r--r--drivers/staging/winbond/wbhal_s.h4
-rw-r--r--drivers/staging/winbond/wblinux.c208
-rw-r--r--drivers/staging/winbond/wblinux_s.h4
-rw-r--r--drivers/staging/wlan-ng/Kconfig2
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h2
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c1
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c2
-rw-r--r--drivers/staging/wlan-ng/wlan_compat.h8
-rw-r--r--drivers/usb/Kconfig5
-rw-r--r--drivers/usb/Makefile3
-rw-r--r--drivers/usb/atm/speedtch.c12
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/core/driver.c3
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/gadget/config.c2
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c7
-rw-r--r--drivers/usb/host/Kconfig29
-rw-r--r--drivers/usb/host/Makefile3
-rw-r--r--drivers/usb/host/hwa-hc.c925
-rw-r--r--drivers/usb/host/ohci-hcd.c21
-rw-r--r--drivers/usb/host/ohci-tmio.c376
-rw-r--r--drivers/usb/host/whci/Kbuild11
-rw-r--r--drivers/usb/host/whci/asl.c367
-rw-r--r--drivers/usb/host/whci/hcd.c339
-rw-r--r--drivers/usb/host/whci/hw.c87
-rw-r--r--drivers/usb/host/whci/init.c188
-rw-r--r--drivers/usb/host/whci/int.c95
-rw-r--r--drivers/usb/host/whci/pzl.c398
-rw-r--r--drivers/usb/host/whci/qset.c567
-rw-r--r--drivers/usb/host/whci/whcd.h197
-rw-r--r--drivers/usb/host/whci/whci-hc.h416
-rw-r--r--drivers/usb/host/whci/wusb.c241
-rw-r--r--drivers/usb/misc/usbtest.c3
-rw-r--r--drivers/usb/serial/option.c96
-rw-r--r--drivers/usb/storage/initializers.c3
-rw-r--r--drivers/usb/storage/unusual_devs.h291
-rw-r--r--drivers/usb/wusbcore/Kconfig41
-rw-r--r--drivers/usb/wusbcore/Makefile26
-rw-r--r--drivers/usb/wusbcore/cbaf.c673
-rw-r--r--drivers/usb/wusbcore/crypto.c538
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c143
-rw-r--r--drivers/usb/wusbcore/devconnect.c1297
-rw-r--r--drivers/usb/wusbcore/mmc.c321
-rw-r--r--drivers/usb/wusbcore/pal.c42
-rw-r--r--drivers/usb/wusbcore/reservation.c115
-rw-r--r--drivers/usb/wusbcore/rh.c477
-rw-r--r--drivers/usb/wusbcore/security.c642
-rw-r--r--drivers/usb/wusbcore/wa-hc.c95
-rw-r--r--drivers/usb/wusbcore/wa-hc.h417
-rw-r--r--drivers/usb/wusbcore/wa-nep.c310
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c562
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c1709
-rw-r--r--drivers/usb/wusbcore/wusbhc.c418
-rw-r--r--drivers/usb/wusbcore/wusbhc.h495
-rw-r--r--drivers/uwb/Kconfig90
-rw-r--r--drivers/uwb/Makefile29
-rw-r--r--drivers/uwb/address.c374
-rw-r--r--drivers/uwb/beacon.c642
-rw-r--r--drivers/uwb/driver.c144
-rw-r--r--drivers/uwb/drp-avail.c288
-rw-r--r--drivers/uwb/drp-ie.c232
-rw-r--r--drivers/uwb/drp.c461
-rw-r--r--drivers/uwb/est.c477
-rw-r--r--drivers/uwb/hwa-rc.c926
-rw-r--r--drivers/uwb/i1480/Makefile2
-rw-r--r--drivers/uwb/i1480/dfu/Makefile9
-rw-r--r--drivers/uwb/i1480/dfu/dfu.c217
-rw-r--r--drivers/uwb/i1480/dfu/i1480-dfu.h260
-rw-r--r--drivers/uwb/i1480/dfu/mac.c527
-rw-r--r--drivers/uwb/i1480/dfu/phy.c203
-rw-r--r--drivers/uwb/i1480/dfu/usb.c500
-rw-r--r--drivers/uwb/i1480/i1480-est.c99
-rw-r--r--drivers/uwb/i1480/i1480-wlp.h200
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/Makefile8
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h284
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/lc.c421
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/netdev.c368
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/rx.c486
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/sysfs.c408
-rw-r--r--drivers/uwb/i1480/i1480u-wlp/tx.c632
-rw-r--r--drivers/uwb/ie.c541
-rw-r--r--drivers/uwb/lc-dev.c492
-rw-r--r--drivers/uwb/lc-rc.c495
-rw-r--r--drivers/uwb/neh.c616
-rw-r--r--drivers/uwb/pal.c91
-rw-r--r--drivers/uwb/reset.c362
-rw-r--r--drivers/uwb/rsv.c680
-rw-r--r--drivers/uwb/scan.c133
-rw-r--r--drivers/uwb/umc-bus.c218
-rw-r--r--drivers/uwb/umc-dev.c104
-rw-r--r--drivers/uwb/umc-drv.c31
-rw-r--r--drivers/uwb/uwb-debug.c367
-rw-r--r--drivers/uwb/uwb-internal.h305
-rw-r--r--drivers/uwb/uwbd.c410
-rw-r--r--drivers/uwb/whc-rc.c520
-rw-r--r--drivers/uwb/whci.c269
-rw-r--r--drivers/uwb/wlp/Makefile10
-rw-r--r--drivers/uwb/wlp/driver.c43
-rw-r--r--drivers/uwb/wlp/eda.c449
-rw-r--r--drivers/uwb/wlp/messages.c1946
-rw-r--r--drivers/uwb/wlp/sysfs.c709
-rw-r--r--drivers/uwb/wlp/txrx.c374
-rw-r--r--drivers/uwb/wlp/wlp-internal.h228
-rw-r--r--drivers/uwb/wlp/wlp-lc.c585
-rw-r--r--drivers/uwb/wlp/wss-lc.c1055
-rw-r--r--drivers/watchdog/w83697ug_wdt.c2
-rw-r--r--fs/9p/vfs_file.c2
-rw-r--r--fs/ext3/ioctl.c12
-rw-r--r--fs/ext3/super.c23
-rw-r--r--fs/jbd/checkpoint.c68
-rw-r--r--fs/jbd/journal.c28
-rw-r--r--fs/jbd/recovery.c7
-rw-r--r--fs/nfsd/export.c19
-rw-r--r--fs/nfsd/nfssvc.c4
-rw-r--r--fs/nfsd/vfs.c6
-rw-r--r--include/asm-generic/mutex-dec.h26
-rw-r--r--include/asm-generic/mutex-xchg.h9
-rw-r--r--include/asm-x86/iommu.h4
-rw-r--r--include/asm-xtensa/io.h8
-rw-r--r--include/asm-xtensa/rwsem.h4
-rw-r--r--include/asm-xtensa/variant-dc232b/core.h424
-rw-r--r--include/asm-xtensa/variant-dc232b/tie-asm.h122
-rw-r--r--include/asm-xtensa/variant-dc232b/tie.h131
-rw-r--r--include/linux/bitmap.h1
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dm-region-hash.h104
-rw-r--r--include/linux/dma_remapping.h27
-rw-r--r--include/linux/i2c-algo-pcf.h5
-rw-r--r--include/linux/i2c.h163
-rw-r--r--include/linux/i2c/twl4030.h28
-rw-r--r--include/linux/intel-iommu.h66
-rw-r--r--include/linux/irqnr.h6
-rw-r--r--include/linux/jbd.h2
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/mlx4/cmd.h9
-rw-r--r--include/linux/mlx4/device.h54
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/moduleparam.h25
-rw-r--r--include/linux/oprofile.h16
-rw-r--r--include/linux/page_cgroup.h5
-rw-r--r--include/linux/pci.h13
-rw-r--r--include/linux/pci_hotplug.h11
-rw-r--r--include/linux/pci_ids.h8
-rw-r--r--include/linux/pci_regs.h2
-rw-r--r--include/linux/profile.h7
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/usb/wusb-wa.h271
-rw-r--r--include/linux/usb/wusb.h376
-rw-r--r--include/linux/uwb.h765
-rw-r--r--include/linux/uwb/debug-cmd.h57
-rw-r--r--include/linux/uwb/debug.h82
-rw-r--r--include/linux/uwb/spec.h727
-rw-r--r--include/linux/uwb/umc.h194
-rw-r--r--include/linux/uwb/whci.h117
-rw-r--r--include/linux/videodev2.h7
-rw-r--r--include/linux/wlp.h735
-rw-r--r--include/linux/workqueue.h18
-rw-r--r--include/media/v4l2-int-device.h28
-rw-r--r--include/media/v4l2-ioctl.h24
-rw-r--r--include/media/videobuf-dvb.h1
-rw-r--r--include/net/9p/9p.h2
-rw-r--r--include/net/9p/client.h2
-rw-r--r--init/Kconfig3
-rw-r--r--init/main.c14
-rw-r--r--kernel/irq/chip.c3
-rw-r--r--kernel/module.c284
-rw-r--r--kernel/panic.c17
-rw-r--r--kernel/params.c274
-rw-r--r--kernel/rcupdate.c19
-rw-r--r--kernel/sched.c51
-rw-r--r--kernel/sched_fair.c62
-rw-r--r--kernel/sched_features.h2
-rw-r--r--kernel/sched_stats.h2
-rw-r--r--kernel/stop_machine.c120
-rw-r--r--kernel/sysctl.c10
-rw-r--r--kernel/time/tick-sched.c12
-rw-r--r--kernel/workqueue.c7
-rw-r--r--lib/bitmap.c22
-rw-r--r--mm/memcontrol.c1
-rw-r--r--mm/page_cgroup.c33
-rw-r--r--net/9p/Kconfig6
-rw-r--r--net/9p/Makefile4
-rw-r--r--net/9p/client.c1
-rw-r--r--net/9p/protocol.c33
-rw-r--r--net/9p/trans_fd.c4
-rw-r--r--net/9p/trans_rdma.c712
757 files changed, 66132 insertions, 7902 deletions
diff --git a/CREDITS b/CREDITS
index 2358846f06be..b50db1767554 100644
--- a/CREDITS
+++ b/CREDITS
@@ -598,6 +598,11 @@ S: Tamsui town, Taipei county,
598S: Taiwan 251 598S: Taiwan 251
599S: Republic of China 599S: Republic of China
600 600
601N: Reinette Chatre
602E: reinette.chatre@intel.com
603D: WiMedia Link Protocol implementation
604D: UWB stack bits and pieces
605
601N: Michael Elizabeth Chastain 606N: Michael Elizabeth Chastain
602E: mec@shout.net 607E: mec@shout.net
603D: Configure, Menuconfig, xconfig 608D: Configure, Menuconfig, xconfig
@@ -2695,6 +2700,12 @@ S: Demonstratsii 8-382
2695S: Tula 300000 2700S: Tula 300000
2696S: Russia 2701S: Russia
2697 2702
2703N: Inaky Perez-Gonzalez
2704E: inaky.perez-gonzalez@intel.com
2705D: UWB stack, HWA-RC driver and HWA-HC drivers
2706D: Wireless USB additions to the USB stack
2707D: WiMedia Link Protocol bits and pieces
2708
2698N: Gordon Peters 2709N: Gordon Peters
2699E: GordPeters@smarttech.com 2710E: GordPeters@smarttech.com
2700D: Isochronous receive for IEEE 1394 driver (OHCI module). 2711D: Isochronous receive for IEEE 1394 driver (OHCI module).
diff --git a/Documentation/ABI/testing/sysfs-bus-umc b/Documentation/ABI/testing/sysfs-bus-umc
new file mode 100644
index 000000000000..948fec412446
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-umc
@@ -0,0 +1,28 @@
1What: /sys/bus/umc/
2Date: July 2008
3KernelVersion: 2.6.27
4Contact: David Vrabel <david.vrabel@csr.com>
5Description:
6 The Wireless Host Controller Interface (WHCI)
7 specification describes a PCI-based device with
8 multiple capabilities; the UWB Multi-interface
9 Controller (UMC).
10
11 The umc bus presents each of the individual
12 capabilties as a device.
13
14What: /sys/bus/umc/devices/.../capability_id
15Date: July 2008
16KernelVersion: 2.6.27
17Contact: David Vrabel <david.vrabel@csr.com>
18Description:
19 The ID of this capability, with 0 being the radio
20 controller capability.
21
22What: /sys/bus/umc/devices/.../version
23Date: July 2008
24KernelVersion: 2.6.27
25Contact: David Vrabel <david.vrabel@csr.com>
26Description:
27 The specification version this capability's hardware
28 interface complies with.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index df6c8a0159f1..7772928ee48f 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -101,3 +101,46 @@ Description:
101Users: 101Users:
102 USB PM tool 102 USB PM tool
103 git://git.moblin.org/users/sarah/usb-pm-tool/ 103 git://git.moblin.org/users/sarah/usb-pm-tool/
104
105What: /sys/bus/usb/device/.../authorized
106Date: July 2008
107KernelVersion: 2.6.26
108Contact: David Vrabel <david.vrabel@csr.com>
109Description:
110 Authorized devices are available for use by device
111 drivers, non-authorized one are not. By default, wired
112 USB devices are authorized.
113
114 Certified Wireless USB devices are not authorized
115 initially and should be (by writing 1) after the
116 device has been authenticated.
117
118What: /sys/bus/usb/device/.../wusb_cdid
119Date: July 2008
120KernelVersion: 2.6.27
121Contact: David Vrabel <david.vrabel@csr.com>
122Description:
123 For Certified Wireless USB devices only.
124
125 A devices's CDID, as 16 space-separated hex octets.
126
127What: /sys/bus/usb/device/.../wusb_ck
128Date: July 2008
129KernelVersion: 2.6.27
130Contact: David Vrabel <david.vrabel@csr.com>
131Description:
132 For Certified Wireless USB devices only.
133
134 Write the device's connection key (CK) to start the
135 authentication of the device. The CK is 16
136 space-separated hex octets.
137
138What: /sys/bus/usb/device/.../wusb_disconnect
139Date: July 2008
140KernelVersion: 2.6.27
141Contact: David Vrabel <david.vrabel@csr.com>
142Description:
143 For Certified Wireless USB devices only.
144
145 Write a 1 to force the device to disconnect
146 (equivalent to unplugging a wired USB device).
diff --git a/Documentation/ABI/testing/sysfs-class-usb_host b/Documentation/ABI/testing/sysfs-class-usb_host
new file mode 100644
index 000000000000..46b66ad1f1b4
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-usb_host
@@ -0,0 +1,25 @@
1What: /sys/class/usb_host/usb_hostN/wusb_chid
2Date: July 2008
3KernelVersion: 2.6.27
4Contact: David Vrabel <david.vrabel@csr.com>
5Description:
6 Write the CHID (16 space-separated hex octets) for this host controller.
7 This starts the host controller, allowing it to accept connection from
8 WUSB devices.
9
10 Set an all zero CHID to stop the host controller.
11
12What: /sys/class/usb_host/usb_hostN/wusb_trust_timeout
13Date: July 2008
14KernelVersion: 2.6.27
15Contact: David Vrabel <david.vrabel@csr.com>
16Description:
17 Devices that haven't sent a WUSB packet to the host
18 within 'wusb_trust_timeout' ms are considered to have
19 disconnected and are removed. The default value of
20 4000 ms is the value required by the WUSB
21 specification.
22
23 Since this relates to security (specifically, the
24 lifetime of PTKs and GTKs) it should not be changed
25 from the default.
diff --git a/Documentation/ABI/testing/sysfs-class-uwb_rc b/Documentation/ABI/testing/sysfs-class-uwb_rc
new file mode 100644
index 000000000000..a0d18dbeb7a9
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-uwb_rc
@@ -0,0 +1,144 @@
1What: /sys/class/uwb_rc
2Date: July 2008
3KernelVersion: 2.6.27
4Contact: linux-usb@vger.kernel.org
5Description:
6 Interfaces for WiMedia Ultra Wideband Common Radio
7 Platform (UWB) radio controllers.
8
9 Familiarity with the ECMA-368 'High Rate Ultra
10 Wideband MAC and PHY Specification' is assumed.
11
12What: /sys/class/uwb_rc/beacon_timeout_ms
13Date: July 2008
14KernelVersion: 2.6.27
15Description:
16 If no beacons are received from a device for at least
17 this time, the device will be considered to have gone
18 and it will be removed. The default is 3 superframes
19 (~197 ms) as required by the specification.
20
21What: /sys/class/uwb_rc/uwbN/
22Date: July 2008
23KernelVersion: 2.6.27
24Contact: linux-usb@vger.kernel.org
25Description:
26 An individual UWB radio controller.
27
28What: /sys/class/uwb_rc/uwbN/beacon
29Date: July 2008
30KernelVersion: 2.6.27
31Contact: linux-usb@vger.kernel.org
32Description:
33 Write:
34
35 <channel> [<bpst offset>]
36
37 to start beaconing on a specific channel, or stop
38 beaconing if <channel> is -1. Valid channels depends
39 on the radio controller's supported band groups.
40
41 <bpst offset> may be used to try and join a specific
42 beacon group if more than one was found during a scan.
43
44What: /sys/class/uwb_rc/uwbN/scan
45Date: July 2008
46KernelVersion: 2.6.27
47Contact: linux-usb@vger.kernel.org
48Description:
49 Write:
50
51 <channel> <type> [<bpst offset>]
52
53 to start (or stop) scanning on a channel. <type> is one of:
54 0 - scan
55 1 - scan outside BP
56 2 - scan while inactive
57 3 - scanning disabled
58 4 - scan (with start time of <bpst offset>)
59
60What: /sys/class/uwb_rc/uwbN/mac_address
61Date: July 2008
62KernelVersion: 2.6.27
63Contact: linux-usb@vger.kernel.org
64Description:
65 The EUI-48, in colon-separated hex octets, for this
66 radio controller. A write will change the radio
67 controller's EUI-48 but only do so while the device is
68 not beaconing or scanning.
69
70What: /sys/class/uwb_rc/uwbN/wusbhc
71Date: July 2008
72KernelVersion: 2.6.27
73Contact: linux-usb@vger.kernel.org
74Description:
75 A symlink to the device (if any) of the WUSB Host
76 Controller PAL using this radio controller.
77
78What: /sys/class/uwb_rc/uwbN/<EUI-48>/
79Date: July 2008
80KernelVersion: 2.6.27
81Contact: linux-usb@vger.kernel.org
82Description:
83 A neighbour UWB device that has either been detected
84 as part of a scan or is a member of the radio
85 controllers beacon group.
86
87What: /sys/class/uwb_rc/uwbN/<EUI-48>/BPST
88Date: July 2008
89KernelVersion: 2.6.27
90Contact: linux-usb@vger.kernel.org
91Description:
92 The time (using the radio controllers internal 1 ms
93 interval superframe timer) of the last beacon from
94 this device was received.
95
96What: /sys/class/uwb_rc/uwbN/<EUI-48>/DevAddr
97Date: July 2008
98KernelVersion: 2.6.27
99Contact: linux-usb@vger.kernel.org
100Description:
101 The current DevAddr of this device in colon separated
102 hex octets.
103
104What: /sys/class/uwb_rc/uwbN/<EUI-48>/EUI_48
105Date: July 2008
106KernelVersion: 2.6.27
107Contact: linux-usb@vger.kernel.org
108Description:
109
110 The EUI-48 of this device in colon separated hex
111 octets.
112
113What: /sys/class/uwb_rc/uwbN/<EUI-48>/BPST
114Date: July 2008
115KernelVersion: 2.6.27
116Contact: linux-usb@vger.kernel.org
117Description:
118
119What: /sys/class/uwb_rc/uwbN/<EUI-48>/IEs
120Date: July 2008
121KernelVersion: 2.6.27
122Contact: linux-usb@vger.kernel.org
123Description:
124 The latest IEs included in this device's beacon, in
125 space separated hex octets with one IE per line.
126
127What: /sys/class/uwb_rc/uwbN/<EUI-48>/LQE
128Date: July 2008
129KernelVersion: 2.6.27
130Contact: linux-usb@vger.kernel.org
131Description:
132 Link Quality Estimate - the Signal to Noise Ratio
133 (SNR) of all packets received from this device in dB.
134 This gives an estimate on a suitable PHY rate. Refer
135 to [ECMA-368] section 13.3 for more details.
136
137What: /sys/class/uwb_rc/uwbN/<EUI-48>/RSSI
138Date: July 2008
139KernelVersion: 2.6.27
140Contact: linux-usb@vger.kernel.org
141Description:
142 Received Signal Strength Indication - the strength of
143 the received signal in dB. LQE is a more useful
144 measure of the radio link quality.
diff --git a/Documentation/ABI/testing/sysfs-wusb_cbaf b/Documentation/ABI/testing/sysfs-wusb_cbaf
new file mode 100644
index 000000000000..a99c5f86a37a
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-wusb_cbaf
@@ -0,0 +1,100 @@
1What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_*
2Date: August 2008
3KernelVersion: 2.6.27
4Contact: David Vrabel <david.vrabel@csr.com>
5Description:
6 Various files for managing Cable Based Association of
7 (wireless) USB devices.
8
9 The sequence of operations should be:
10
11 1. Device is plugged in.
12
13 2. The connection manager (CM) sees a device with CBA capability.
14 (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE).
15
16 3. The CM writes the host name, supported band groups,
17 and the CHID (host ID) into the wusb_host_name,
18 wusb_host_band_groups and wusb_chid files. These
19 get sent to the device and the CDID (if any) for
20 this host is requested.
21
22 4. The CM can verify that the device's supported band
23 groups (wusb_device_band_groups) are compatible
24 with the host.
25
26 5. The CM reads the wusb_cdid file.
27
28 6. The CM looks it up its database.
29
30 - If it has a matching CHID,CDID entry, the device
31 has been authorized before and nothing further
32 needs to be done.
33
34 - If the CDID is zero (or the CM doesn't find a
35 matching CDID in its database), the device is
36 assumed to be not known. The CM may associate
37 the host with device by: writing a randomly
38 generated CDID to wusb_cdid and then a random CK
39 to wusb_ck (this uploads the new CC to the
40 device).
41
42 CMD may choose to prompt the user before
43 associating with a new device.
44
45 7. Device is unplugged.
46
47 References:
48 [WUSB-AM] Association Models Supplement to the
49 Certified Wireless Universal Serial Bus
50 Specification, version 1.0.
51
52What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_chid
53Date: August 2008
54KernelVersion: 2.6.27
55Contact: David Vrabel <david.vrabel@csr.com>
56Description:
57 The CHID of the host formatted as 16 space-separated
58 hex octets.
59
60 Writes fetches device's supported band groups and the
61 the CDID for any existing association with this host.
62
63What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_host_name
64Date: August 2008
65KernelVersion: 2.6.27
66Contact: David Vrabel <david.vrabel@csr.com>
67Description:
68 A friendly name for the host as a UTF-8 encoded string.
69
70What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_host_band_groups
71Date: August 2008
72KernelVersion: 2.6.27
73Contact: David Vrabel <david.vrabel@csr.com>
74Description:
75 The band groups supported by the host, in the format
76 defined in [WUSB-AM].
77
78What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_device_band_groups
79Date: August 2008
80KernelVersion: 2.6.27
81Contact: David Vrabel <david.vrabel@csr.com>
82Description:
83 The band groups supported by the device, in the format
84 defined in [WUSB-AM].
85
86What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_cdid
87Date: August 2008
88KernelVersion: 2.6.27
89Contact: David Vrabel <david.vrabel@csr.com>
90Description:
91 The device's CDID formatted as 16 space-separated hex
92 octets.
93
94What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_ck
95Date: August 2008
96KernelVersion: 2.6.27
97Contact: David Vrabel <david.vrabel@csr.com>
98Description:
99 Write 16 space-separated random, hex octets to
100 associate with the device.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index f5f812daf9f4..05d71b4b9430 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -359,3 +359,11 @@ Why: The 2.6 kernel supports direct writing to ide CD drives, which
359 eliminates the need for ide-scsi. The new method is more 359 eliminates the need for ide-scsi. The new method is more
360 efficient in every way. 360 efficient in every way.
361Who: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> 361Who: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
362
363---------------------------
364
365What: i2c_attach_client(), i2c_detach_client(), i2c_driver->detach_client()
366When: 2.6.29 (ideally) or 2.6.30 (more likely)
367Why: Deprecated by the new (standard) device driver binding model. Use
368 i2c_driver->probe() and ->remove() instead.
369Who: Jean Delvare <khali@linux-fr.org>
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index c31e0291e167..81c0c59a60ea 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -13,8 +13,9 @@ Supported adapters:
13 * Intel 631xESB/632xESB (ESB2) 13 * Intel 631xESB/632xESB (ESB2)
14 * Intel 82801H (ICH8) 14 * Intel 82801H (ICH8)
15 * Intel 82801I (ICH9) 15 * Intel 82801I (ICH9)
16 * Intel Tolapai 16 * Intel EP80579 (Tolapai)
17 * Intel ICH10 17 * Intel 82801JI (ICH10)
18 * Intel PCH
18 Datasheets: Publicly available at the Intel website 19 Datasheets: Publicly available at the Intel website
19 20
20Authors: 21Authors:
@@ -32,7 +33,7 @@ Description
32----------- 33-----------
33 34
34The ICH (properly known as the 82801AA), ICH0 (82801AB), ICH2 (82801BA), 35The ICH (properly known as the 82801AA), ICH0 (82801AB), ICH2 (82801BA),
35ICH3 (82801CA/CAM) and later devices are Intel chips that are a part of 36ICH3 (82801CA/CAM) and later devices (PCH) are Intel chips that are a part of
36Intel's '810' chipset for Celeron-based PCs, '810E' chipset for 37Intel's '810' chipset for Celeron-based PCs, '810E' chipset for
37Pentium-based PCs, '815E' chipset, and others. 38Pentium-based PCs, '815E' chipset, and others.
38 39
diff --git a/Documentation/i2c/porting-clients b/Documentation/i2c/porting-clients
deleted file mode 100644
index 7bf82c08f6ca..000000000000
--- a/Documentation/i2c/porting-clients
+++ /dev/null
@@ -1,160 +0,0 @@
1Revision 7, 2007-04-19
2Jean Delvare <khali@linux-fr.org>
3Greg KH <greg@kroah.com>
4
5This is a guide on how to convert I2C chip drivers from Linux 2.4 to
6Linux 2.6. I have been using existing drivers (lm75, lm78) as examples.
7Then I converted a driver myself (lm83) and updated this document.
8Note that this guide is strongly oriented towards hardware monitoring
9drivers. Many points are still valid for other type of drivers, but
10others may be irrelevant.
11
12There are two sets of points below. The first set concerns technical
13changes. The second set concerns coding policy. Both are mandatory.
14
15Although reading this guide will help you porting drivers, I suggest
16you keep an eye on an already ported driver while porting your own
17driver. This will help you a lot understanding what this guide
18exactly means. Choose the chip driver that is the more similar to
19yours for best results.
20
21Technical changes:
22
23* [Driver type] Any driver that was relying on i2c-isa has to be
24 converted to a proper isa, platform or pci driver. This is not
25 covered by this guide.
26
27* [Includes] Get rid of "version.h" and <linux/i2c-proc.h>.
28 Includes typically look like that:
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <linux/jiffies.h>
33 #include <linux/i2c.h>
34 #include <linux/hwmon.h> /* for hardware monitoring drivers */
35 #include <linux/hwmon-sysfs.h>
36 #include <linux/hwmon-vid.h> /* if you need VRM support */
37 #include <linux/err.h> /* for class registration */
38 Please respect this inclusion order. Some extra headers may be
39 required for a given driver (e.g. "lm75.h").
40
41* [Addresses] SENSORS_I2C_END becomes I2C_CLIENT_END, ISA addresses
42 are no more handled by the i2c core. Address ranges are no more
43 supported either, define each individual address separately.
44 SENSORS_INSMOD_<n> becomes I2C_CLIENT_INSMOD_<n>.
45
46* [Client data] Get rid of sysctl_id. Try using standard names for
47 register values (for example, temp_os becomes temp_max). You're
48 still relatively free here, but you *have* to follow the standard
49 names for sysfs files (see the Sysctl section below).
50
51* [Function prototypes] The detect functions loses its flags
52 parameter. Sysctl (e.g. lm75_temp) and miscellaneous functions
53 are off the list of prototypes. This usually leaves five
54 prototypes:
55 static int lm75_attach_adapter(struct i2c_adapter *adapter);
56 static int lm75_detect(struct i2c_adapter *adapter, int address,
57 int kind);
58 static void lm75_init_client(struct i2c_client *client);
59 static int lm75_detach_client(struct i2c_client *client);
60 static struct lm75_data lm75_update_device(struct device *dev);
61
62* [Sysctl] All sysctl stuff is of course gone (defines, ctl_table
63 and functions). Instead, you have to define show and set functions for
64 each sysfs file. Only define set for writable values. Take a look at an
65 existing 2.6 driver for details (it87 for example). Don't forget
66 to define the attributes for each file (this is that step that
67 links callback functions). Use the file names specified in
68 Documentation/hwmon/sysfs-interface for the individual files. Also
69 convert the units these files read and write to the specified ones.
70 If you need to add a new type of file, please discuss it on the
71 sensors mailing list <lm-sensors@lm-sensors.org> by providing a
72 patch to the Documentation/hwmon/sysfs-interface file.
73
74* [Attach] The attach function should make sure that the adapter's
75 class has I2C_CLASS_HWMON (or whatever class is suitable for your
76 driver), using the following construct:
77 if (!(adapter->class & I2C_CLASS_HWMON))
78 return 0;
79 Call i2c_probe() instead of i2c_detect().
80
81* [Detect] As mentioned earlier, the flags parameter is gone.
82 The type_name and client_name strings are replaced by a single
83 name string, which will be filled with a lowercase, short string.
84 The labels used for error paths are reduced to the number needed.
85 It is advised that the labels are given descriptive names such as
86 exit and exit_free. Don't forget to properly set err before
87 jumping to error labels. By the way, labels should be left-aligned.
88 Use kzalloc instead of kmalloc.
89 Use i2c_set_clientdata to set the client data (as opposed to
90 a direct access to client->data).
91 Use strlcpy instead of strcpy or snprintf to copy the client name.
92 Replace the sysctl directory registration by calls to
93 device_create_file. Move the driver initialization before any
94 sysfs file creation.
95 Register the client with the hwmon class (using hwmon_device_register)
96 if applicable.
97 Drop client->id.
98 Drop any 24RF08 corruption prevention you find, as this is now done
99 at the i2c-core level, and doing it twice voids it.
100 Don't add I2C_CLIENT_ALLOW_USE to client->flags, it's the default now.
101
102* [Init] Limits must not be set by the driver (can be done later in
103 user-space). Chip should not be reset default (although a module
104 parameter may be used to force it), and initialization should be
105 limited to the strictly necessary steps.
106
107* [Detach] Remove the call to i2c_deregister_entry. Do not log an
108 error message if i2c_detach_client fails, as i2c-core will now do
109 it for you.
110 Unregister from the hwmon class if applicable.
111
112* [Update] The function prototype changed, it is now
113 passed a device structure, which you have to convert to a client
114 using to_i2c_client(dev). The update function should return a
115 pointer to the client data.
116 Don't access client->data directly, use i2c_get_clientdata(client)
117 instead.
118 Use time_after() instead of direct jiffies comparison.
119
120* [Interface] Make sure there is a MODULE_LICENSE() line, at the bottom
121 of the file (after MODULE_AUTHOR() and MODULE_DESCRIPTION(), in this
122 order).
123
124* [Driver] The flags field of the i2c_driver structure is gone.
125 I2C_DF_NOTIFY is now the default behavior.
126 The i2c_driver structure has a driver member, which is itself a
127 structure, those name member should be initialized to a driver name
128 string. i2c_driver itself has no name member anymore.
129
130* [Driver model] Instead of shutdown or reboot notifiers, provide a
131 shutdown() method in your driver.
132
133* [Power management] Use the driver model suspend() and resume()
134 callbacks instead of the obsolete pm_register() calls.
135
136Coding policy:
137
138* [Copyright] Use (C), not (c), for copyright.
139
140* [Debug/log] Get rid of #ifdef DEBUG/#endif constructs whenever you
141 can. Calls to printk for debugging purposes are replaced by calls to
142 dev_dbg where possible, else to pr_debug. Here is an example of how
143 to call it (taken from lm75_detect):
144 dev_dbg(&client->dev, "Starting lm75 update\n");
145 Replace other printk calls with the dev_info, dev_err or dev_warn
146 function, as appropriate.
147
148* [Constants] Constants defines (registers, conversions) should be
149 aligned. This greatly improves readability.
150 Alignments are achieved by the means of tabs, not spaces. Remember
151 that tabs are set to 8 in the Linux kernel code.
152
153* [Layout] Avoid extra empty lines between comments and what they
154 comment. Respect the coding style (see Documentation/CodingStyle),
155 in particular when it comes to placing curly braces.
156
157* [Comments] Make sure that no comment refers to a file that isn't
158 part of the Linux source tree (typically doc/chips/<chip name>),
159 and that remaining comments still match the code. Merging comment
160 lines when possible is encouraged.
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index d73ee117a8ca..6b9af7d479c2 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -10,23 +10,21 @@ General remarks
10=============== 10===============
11 11
12Try to keep the kernel namespace as clean as possible. The best way to 12Try to keep the kernel namespace as clean as possible. The best way to
13do this is to use a unique prefix for all global symbols. This is 13do this is to use a unique prefix for all global symbols. This is
14especially important for exported symbols, but it is a good idea to do 14especially important for exported symbols, but it is a good idea to do
15it for non-exported symbols too. We will use the prefix `foo_' in this 15it for non-exported symbols too. We will use the prefix `foo_' in this
16tutorial, and `FOO_' for preprocessor variables. 16tutorial.
17 17
18 18
19The driver structure 19The driver structure
20==================== 20====================
21 21
22Usually, you will implement a single driver structure, and instantiate 22Usually, you will implement a single driver structure, and instantiate
23all clients from it. Remember, a driver structure contains general access 23all clients from it. Remember, a driver structure contains general access
24routines, and should be zero-initialized except for fields with data you 24routines, and should be zero-initialized except for fields with data you
25provide. A client structure holds device-specific information like the 25provide. A client structure holds device-specific information like the
26driver model device node, and its I2C address. 26driver model device node, and its I2C address.
27 27
28/* iff driver uses driver model ("new style") binding model: */
29
30static struct i2c_device_id foo_idtable[] = { 28static struct i2c_device_id foo_idtable[] = {
31 { "foo", my_id_for_foo }, 29 { "foo", my_id_for_foo },
32 { "bar", my_id_for_bar }, 30 { "bar", my_id_for_bar },
@@ -40,7 +38,6 @@ static struct i2c_driver foo_driver = {
40 .name = "foo", 38 .name = "foo",
41 }, 39 },
42 40
43 /* iff driver uses driver model ("new style") binding model: */
44 .id_table = foo_ids, 41 .id_table = foo_ids,
45 .probe = foo_probe, 42 .probe = foo_probe,
46 .remove = foo_remove, 43 .remove = foo_remove,
@@ -49,24 +46,19 @@ static struct i2c_driver foo_driver = {
49 .detect = foo_detect, 46 .detect = foo_detect,
50 .address_data = &addr_data, 47 .address_data = &addr_data,
51 48
52 /* else, driver uses "legacy" binding model: */
53 .attach_adapter = foo_attach_adapter,
54 .detach_client = foo_detach_client,
55
56 /* these may be used regardless of the driver binding model */
57 .shutdown = foo_shutdown, /* optional */ 49 .shutdown = foo_shutdown, /* optional */
58 .suspend = foo_suspend, /* optional */ 50 .suspend = foo_suspend, /* optional */
59 .resume = foo_resume, /* optional */ 51 .resume = foo_resume, /* optional */
60 .command = foo_command, /* optional */ 52 .command = foo_command, /* optional, deprecated */
61} 53}
62 54
63The name field is the driver name, and must not contain spaces. It 55The name field is the driver name, and must not contain spaces. It
64should match the module name (if the driver can be compiled as a module), 56should match the module name (if the driver can be compiled as a module),
65although you can use MODULE_ALIAS (passing "foo" in this example) to add 57although you can use MODULE_ALIAS (passing "foo" in this example) to add
66another name for the module. If the driver name doesn't match the module 58another name for the module. If the driver name doesn't match the module
67name, the module won't be automatically loaded (hotplug/coldplug). 59name, the module won't be automatically loaded (hotplug/coldplug).
68 60
69All other fields are for call-back functions which will be explained 61All other fields are for call-back functions which will be explained
70below. 62below.
71 63
72 64
@@ -74,34 +66,13 @@ Extra client data
74================= 66=================
75 67
76Each client structure has a special `data' field that can point to any 68Each client structure has a special `data' field that can point to any
77structure at all. You should use this to keep device-specific data, 69structure at all. You should use this to keep device-specific data.
78especially in drivers that handle multiple I2C or SMBUS devices. You
79do not always need this, but especially for `sensors' drivers, it can
80be very useful.
81 70
82 /* store the value */ 71 /* store the value */
83 void i2c_set_clientdata(struct i2c_client *client, void *data); 72 void i2c_set_clientdata(struct i2c_client *client, void *data);
84 73
85 /* retrieve the value */ 74 /* retrieve the value */
86 void *i2c_get_clientdata(struct i2c_client *client); 75 void *i2c_get_clientdata(const struct i2c_client *client);
87
88An example structure is below.
89
90 struct foo_data {
91 struct i2c_client client;
92 enum chips type; /* To keep the chips type for `sensors' drivers. */
93
94 /* Because the i2c bus is slow, it is often useful to cache the read
95 information of a chip for some time (for example, 1 or 2 seconds).
96 It depends of course on the device whether this is really worthwhile
97 or even sensible. */
98 struct mutex update_lock; /* When we are reading lots of information,
99 another process should not update the
100 below information */
101 char valid; /* != 0 if the following fields are valid. */
102 unsigned long last_updated; /* In jiffies */
103 /* Add the read information here too */
104 };
105 76
106 77
107Accessing the client 78Accessing the client
@@ -109,11 +80,9 @@ Accessing the client
109 80
110Let's say we have a valid client structure. At some time, we will need 81Let's say we have a valid client structure. At some time, we will need
111to gather information from the client, or write new information to the 82to gather information from the client, or write new information to the
112client. How we will export this information to user-space is less 83client.
113important at this moment (perhaps we do not need to do this at all for
114some obscure clients). But we need generic reading and writing routines.
115 84
116I have found it useful to define foo_read and foo_write function for this. 85I have found it useful to define foo_read and foo_write functions for this.
117For some cases, it will be easier to call the i2c functions directly, 86For some cases, it will be easier to call the i2c functions directly,
118but many chips have some kind of register-value idea that can easily 87but many chips have some kind of register-value idea that can easily
119be encapsulated. 88be encapsulated.
@@ -121,33 +90,33 @@ be encapsulated.
121The below functions are simple examples, and should not be copied 90The below functions are simple examples, and should not be copied
122literally. 91literally.
123 92
124 int foo_read_value(struct i2c_client *client, u8 reg) 93int foo_read_value(struct i2c_client *client, u8 reg)
125 { 94{
126 if (reg < 0x10) /* byte-sized register */ 95 if (reg < 0x10) /* byte-sized register */
127 return i2c_smbus_read_byte_data(client,reg); 96 return i2c_smbus_read_byte_data(client, reg);
128 else /* word-sized register */ 97 else /* word-sized register */
129 return i2c_smbus_read_word_data(client,reg); 98 return i2c_smbus_read_word_data(client, reg);
130 } 99}
131 100
132 int foo_write_value(struct i2c_client *client, u8 reg, u16 value) 101int foo_write_value(struct i2c_client *client, u8 reg, u16 value)
133 { 102{
134 if (reg == 0x10) /* Impossible to write - driver error! */ { 103 if (reg == 0x10) /* Impossible to write - driver error! */
135 return -1; 104 return -EINVAL;
136 else if (reg < 0x10) /* byte-sized register */ 105 else if (reg < 0x10) /* byte-sized register */
137 return i2c_smbus_write_byte_data(client,reg,value); 106 return i2c_smbus_write_byte_data(client, reg, value);
138 else /* word-sized register */ 107 else /* word-sized register */
139 return i2c_smbus_write_word_data(client,reg,value); 108 return i2c_smbus_write_word_data(client, reg, value);
140 } 109}
141 110
142 111
143Probing and attaching 112Probing and attaching
144===================== 113=====================
145 114
146The Linux I2C stack was originally written to support access to hardware 115The Linux I2C stack was originally written to support access to hardware
147monitoring chips on PC motherboards, and thus it embeds some assumptions 116monitoring chips on PC motherboards, and thus used to embed some assumptions
148that are more appropriate to SMBus (and PCs) than to I2C. One of these 117that were more appropriate to SMBus (and PCs) than to I2C. One of these
149assumptions is that most adapters and devices drivers support the SMBUS_QUICK 118assumptions was that most adapters and devices drivers support the SMBUS_QUICK
150protocol to probe device presence. Another is that devices and their drivers 119protocol to probe device presence. Another was that devices and their drivers
151can be sufficiently configured using only such probe primitives. 120can be sufficiently configured using only such probe primitives.
152 121
153As Linux and its I2C stack became more widely used in embedded systems 122As Linux and its I2C stack became more widely used in embedded systems
@@ -164,6 +133,9 @@ since the "legacy" model requires drivers to create "i2c_client" device
164objects after SMBus style probing, while the Linux driver model expects 133objects after SMBus style probing, while the Linux driver model expects
165drivers to be given such device objects in their probe() routines. 134drivers to be given such device objects in their probe() routines.
166 135
136The legacy model is deprecated now and will soon be removed, so we no
137longer document it here.
138
167 139
168Standard Driver Model Binding ("New Style") 140Standard Driver Model Binding ("New Style")
169------------------------------------------- 141-------------------------------------------
@@ -193,8 +165,8 @@ matches the device's name. It is passed the entry that was matched so
193the driver knows which one in the table matched. 165the driver knows which one in the table matched.
194 166
195 167
196Device Creation (Standard driver model) 168Device Creation
197--------------------------------------- 169---------------
198 170
199If you know for a fact that an I2C device is connected to a given I2C bus, 171If you know for a fact that an I2C device is connected to a given I2C bus,
200you can instantiate that device by simply filling an i2c_board_info 172you can instantiate that device by simply filling an i2c_board_info
@@ -221,8 +193,8 @@ in the I2C bus driver. You may want to save the returned i2c_client
221reference for later use. 193reference for later use.
222 194
223 195
224Device Detection (Standard driver model) 196Device Detection
225---------------------------------------- 197----------------
226 198
227Sometimes you do not know in advance which I2C devices are connected to 199Sometimes you do not know in advance which I2C devices are connected to
228a given I2C bus. This is for example the case of hardware monitoring 200a given I2C bus. This is for example the case of hardware monitoring
@@ -246,8 +218,8 @@ otherwise misdetections are likely to occur and things can get wrong
246quickly. 218quickly.
247 219
248 220
249Device Deletion (Standard driver model) 221Device Deletion
250--------------------------------------- 222---------------
251 223
252Each I2C device which has been created using i2c_new_device() or 224Each I2C device which has been created using i2c_new_device() or
253i2c_new_probed_device() can be unregistered by calling 225i2c_new_probed_device() can be unregistered by calling
@@ -256,264 +228,37 @@ called automatically before the underlying I2C bus itself is removed, as a
256device can't survive its parent in the device driver model. 228device can't survive its parent in the device driver model.
257 229
258 230
259Legacy Driver Binding Model 231Initializing the driver
260--------------------------- 232=======================
233
234When the kernel is booted, or when your foo driver module is inserted,
235you have to do some initializing. Fortunately, just registering the
236driver module is usually enough.
261 237
262Most i2c devices can be present on several i2c addresses; for some this 238static int __init foo_init(void)
263is determined in hardware (by soldering some chip pins to Vcc or Ground), 239{
264for others this can be changed in software (by writing to specific client 240 return i2c_add_driver(&foo_driver);
265registers). Some devices are usually on a specific address, but not always; 241}
266and some are even more tricky. So you will probably need to scan several 242
267i2c addresses for your clients, and do some sort of detection to see 243static void __exit foo_cleanup(void)
268whether it is actually a device supported by your driver. 244{
245 i2c_del_driver(&foo_driver);
246}
247
248/* Substitute your own name and email address */
249MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"
250MODULE_DESCRIPTION("Driver for Barf Inc. Foo I2C devices");
269 251
270To give the user a maximum of possibilities, some default module parameters 252/* a few non-GPL license types are also allowed */
271are defined to help determine what addresses are scanned. Several macros 253MODULE_LICENSE("GPL");
272are defined in i2c.h to help you support them, as well as a generic 254
273detection algorithm. 255module_init(foo_init);
274 256module_exit(foo_cleanup);
275You do not have to use this parameter interface; but don't try to use 257
276function i2c_probe() if you don't. 258Note that some functions are marked by `__init'. These functions can
277 259be removed after kernel booting (or module loading) is completed.
278 260Likewise, functions marked by `__exit' are dropped by the compiler when
279Probing classes (Legacy model) 261the code is built into the kernel, as they would never be called.
280------------------------------
281
282All parameters are given as lists of unsigned 16-bit integers. Lists are
283terminated by I2C_CLIENT_END.
284The following lists are used internally:
285
286 normal_i2c: filled in by the module writer.
287 A list of I2C addresses which should normally be examined.
288 probe: insmod parameter.
289 A list of pairs. The first value is a bus number (-1 for any I2C bus),
290 the second is the address. These addresses are also probed, as if they
291 were in the 'normal' list.
292 ignore: insmod parameter.
293 A list of pairs. The first value is a bus number (-1 for any I2C bus),
294 the second is the I2C address. These addresses are never probed.
295 This parameter overrules the 'normal_i2c' list only.
296 force: insmod parameter.
297 A list of pairs. The first value is a bus number (-1 for any I2C bus),
298 the second is the I2C address. A device is blindly assumed to be on
299 the given address, no probing is done.
300
301Additionally, kind-specific force lists may optionally be defined if
302the driver supports several chip kinds. They are grouped in a
303NULL-terminated list of pointers named forces, those first element if the
304generic force list mentioned above. Each additional list correspond to an
305insmod parameter of the form force_<kind>.
306
307Fortunately, as a module writer, you just have to define the `normal_i2c'
308parameter. The complete declaration could look like this:
309
310 /* Scan 0x4c to 0x4f */
311 static const unsigned short normal_i2c[] = { 0x4c, 0x4d, 0x4e, 0x4f,
312 I2C_CLIENT_END };
313
314 /* Magic definition of all other variables and things */
315 I2C_CLIENT_INSMOD;
316 /* Or, if your driver supports, say, 2 kind of devices: */
317 I2C_CLIENT_INSMOD_2(foo, bar);
318
319If you use the multi-kind form, an enum will be defined for you:
320 enum chips { any_chip, foo, bar, ... }
321You can then (and certainly should) use it in the driver code.
322
323Note that you *have* to call the defined variable `normal_i2c',
324without any prefix!
325
326
327Attaching to an adapter (Legacy model)
328--------------------------------------
329
330Whenever a new adapter is inserted, or for all adapters if the driver is
331being registered, the callback attach_adapter() is called. Now is the
332time to determine what devices are present on the adapter, and to register
333a client for each of them.
334
335The attach_adapter callback is really easy: we just call the generic
336detection function. This function will scan the bus for us, using the
337information as defined in the lists explained above. If a device is
338detected at a specific address, another callback is called.
339
340 int foo_attach_adapter(struct i2c_adapter *adapter)
341 {
342 return i2c_probe(adapter,&addr_data,&foo_detect_client);
343 }
344
345Remember, structure `addr_data' is defined by the macros explained above,
346so you do not have to define it yourself.
347
348The i2c_probe function will call the foo_detect_client
349function only for those i2c addresses that actually have a device on
350them (unless a `force' parameter was used). In addition, addresses that
351are already in use (by some other registered client) are skipped.
352
353
354The detect client function (Legacy model)
355-----------------------------------------
356
357The detect client function is called by i2c_probe. The `kind' parameter
358contains -1 for a probed detection, 0 for a forced detection, or a positive
359number for a forced detection with a chip type forced.
360
361Returning an error different from -ENODEV in a detect function will cause
362the detection to stop: other addresses and adapters won't be scanned.
363This should only be done on fatal or internal errors, such as a memory
364shortage or i2c_attach_client failing.
365
366For now, you can ignore the `flags' parameter. It is there for future use.
367
368 int foo_detect_client(struct i2c_adapter *adapter, int address,
369 int kind)
370 {
371 int err = 0;
372 int i;
373 struct i2c_client *client;
374 struct foo_data *data;
375 const char *name = "";
376
377 /* Let's see whether this adapter can support what we need.
378 Please substitute the things you need here! */
379 if (!i2c_check_functionality(adapter,I2C_FUNC_SMBUS_WORD_DATA |
380 I2C_FUNC_SMBUS_WRITE_BYTE))
381 goto ERROR0;
382
383 /* OK. For now, we presume we have a valid client. We now create the
384 client structure, even though we cannot fill it completely yet.
385 But it allows us to access several i2c functions safely */
386
387 if (!(data = kzalloc(sizeof(struct foo_data), GFP_KERNEL))) {
388 err = -ENOMEM;
389 goto ERROR0;
390 }
391
392 client = &data->client;
393 i2c_set_clientdata(client, data);
394
395 client->addr = address;
396 client->adapter = adapter;
397 client->driver = &foo_driver;
398
399 /* Now, we do the remaining detection. If no `force' parameter is used. */
400
401 /* First, the generic detection (if any), that is skipped if any force
402 parameter was used. */
403 if (kind < 0) {
404 /* The below is of course bogus */
405 if (foo_read(client, FOO_REG_GENERIC) != FOO_GENERIC_VALUE)
406 goto ERROR1;
407 }
408
409 /* Next, specific detection. This is especially important for `sensors'
410 devices. */
411
412 /* Determine the chip type. Not needed if a `force_CHIPTYPE' parameter
413 was used. */
414 if (kind <= 0) {
415 i = foo_read(client, FOO_REG_CHIPTYPE);
416 if (i == FOO_TYPE_1)
417 kind = chip1; /* As defined in the enum */
418 else if (i == FOO_TYPE_2)
419 kind = chip2;
420 else {
421 printk("foo: Ignoring 'force' parameter for unknown chip at "
422 "adapter %d, address 0x%02x\n",i2c_adapter_id(adapter),address);
423 goto ERROR1;
424 }
425 }
426
427 /* Now set the type and chip names */
428 if (kind == chip1) {
429 name = "chip1";
430 } else if (kind == chip2) {
431 name = "chip2";
432 }
433
434 /* Fill in the remaining client fields. */
435 strlcpy(client->name, name, I2C_NAME_SIZE);
436 data->type = kind;
437 mutex_init(&data->update_lock); /* Only if you use this field */
438
439 /* Any other initializations in data must be done here too. */
440
441 /* This function can write default values to the client registers, if
442 needed. */
443 foo_init_client(client);
444
445 /* Tell the i2c layer a new client has arrived */
446 if ((err = i2c_attach_client(client)))
447 goto ERROR1;
448
449 return 0;
450
451 /* OK, this is not exactly good programming practice, usually. But it is
452 very code-efficient in this case. */
453
454 ERROR1:
455 kfree(data);
456 ERROR0:
457 return err;
458 }
459
460
461Removing the client (Legacy model)
462==================================
463
464The detach_client call back function is called when a client should be
465removed. It may actually fail, but only when panicking. This code is
466much simpler than the attachment code, fortunately!
467
468 int foo_detach_client(struct i2c_client *client)
469 {
470 int err;
471
472 /* Try to detach the client from i2c space */
473 if ((err = i2c_detach_client(client)))
474 return err;
475
476 kfree(i2c_get_clientdata(client));
477 return 0;
478 }
479
480
481Initializing the module or kernel
482=================================
483
484When the kernel is booted, or when your foo driver module is inserted,
485you have to do some initializing. Fortunately, just attaching (registering)
486the driver module is usually enough.
487
488 static int __init foo_init(void)
489 {
490 int res;
491
492 if ((res = i2c_add_driver(&foo_driver))) {
493 printk("foo: Driver registration failed, module not inserted.\n");
494 return res;
495 }
496 return 0;
497 }
498
499 static void __exit foo_cleanup(void)
500 {
501 i2c_del_driver(&foo_driver);
502 }
503
504 /* Substitute your own name and email address */
505 MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"
506 MODULE_DESCRIPTION("Driver for Barf Inc. Foo I2C devices");
507
508 /* a few non-GPL license types are also allowed */
509 MODULE_LICENSE("GPL");
510
511 module_init(foo_init);
512 module_exit(foo_cleanup);
513
514Note that some functions are marked by `__init', and some data structures
515by `__initdata'. These functions and structures can be removed after
516kernel booting (or module loading) is completed.
517 262
518 263
519Power Management 264Power Management
@@ -548,33 +293,35 @@ Command function
548 293
549A generic ioctl-like function call back is supported. You will seldom 294A generic ioctl-like function call back is supported. You will seldom
550need this, and its use is deprecated anyway, so newer design should not 295need this, and its use is deprecated anyway, so newer design should not
551use it. Set it to NULL. 296use it.
552 297
553 298
554Sending and receiving 299Sending and receiving
555===================== 300=====================
556 301
557If you want to communicate with your device, there are several functions 302If you want to communicate with your device, there are several functions
558to do this. You can find all of them in i2c.h. 303to do this. You can find all of them in <linux/i2c.h>.
559 304
560If you can choose between plain i2c communication and SMBus level 305If you can choose between plain I2C communication and SMBus level
561communication, please use the last. All adapters understand SMBus level 306communication, please use the latter. All adapters understand SMBus level
562commands, but only some of them understand plain i2c! 307commands, but only some of them understand plain I2C!
563 308
564 309
565Plain i2c communication 310Plain I2C communication
566----------------------- 311-----------------------
567 312
568 extern int i2c_master_send(struct i2c_client *,const char* ,int); 313 int i2c_master_send(struct i2c_client *client, const char *buf,
569 extern int i2c_master_recv(struct i2c_client *,char* ,int); 314 int count);
315 int i2c_master_recv(struct i2c_client *client, char *buf, int count);
570 316
571These routines read and write some bytes from/to a client. The client 317These routines read and write some bytes from/to a client. The client
572contains the i2c address, so you do not have to include it. The second 318contains the i2c address, so you do not have to include it. The second
573parameter contains the bytes the read/write, the third the length of the 319parameter contains the bytes to read/write, the third the number of bytes
574buffer. Returned is the actual number of bytes read/written. 320to read/write (must be less than the length of the buffer.) Returned is
575 321the actual number of bytes read/written.
576 extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msg, 322
577 int num); 323 int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msg,
324 int num);
578 325
579This sends a series of messages. Each message can be a read or write, 326This sends a series of messages. Each message can be a read or write,
580and they can be mixed in any way. The transactions are combined: no 327and they can be mixed in any way. The transactions are combined: no
@@ -583,49 +330,45 @@ for each message the client address, the number of bytes of the message
583and the message data itself. 330and the message data itself.
584 331
585You can read the file `i2c-protocol' for more information about the 332You can read the file `i2c-protocol' for more information about the
586actual i2c protocol. 333actual I2C protocol.
587 334
588 335
589SMBus communication 336SMBus communication
590------------------- 337-------------------
591 338
592 extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr, 339 s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
593 unsigned short flags, 340 unsigned short flags, char read_write, u8 command,
594 char read_write, u8 command, int size, 341 int size, union i2c_smbus_data *data);
595 union i2c_smbus_data * data); 342
596 343This is the generic SMBus function. All functions below are implemented
597 This is the generic SMBus function. All functions below are implemented 344in terms of it. Never use this function directly!
598 in terms of it. Never use this function directly! 345
599 346 s32 i2c_smbus_read_byte(struct i2c_client *client);
600 347 s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value);
601 extern s32 i2c_smbus_read_byte(struct i2c_client * client); 348 s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command);
602 extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value); 349 s32 i2c_smbus_write_byte_data(struct i2c_client *client,
603 extern s32 i2c_smbus_read_byte_data(struct i2c_client * client, u8 command); 350 u8 command, u8 value);
604 extern s32 i2c_smbus_write_byte_data(struct i2c_client * client, 351 s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command);
605 u8 command, u8 value); 352 s32 i2c_smbus_write_word_data(struct i2c_client *client,
606 extern s32 i2c_smbus_read_word_data(struct i2c_client * client, u8 command); 353 u8 command, u16 value);
607 extern s32 i2c_smbus_write_word_data(struct i2c_client * client, 354 s32 i2c_smbus_process_call(struct i2c_client *client,
608 u8 command, u16 value); 355 u8 command, u16 value);
609 extern s32 i2c_smbus_process_call(struct i2c_client *client, 356 s32 i2c_smbus_read_block_data(struct i2c_client *client,
610 u8 command, u16 value); 357 u8 command, u8 *values);
611 extern s32 i2c_smbus_read_block_data(struct i2c_client * client, 358 s32 i2c_smbus_write_block_data(struct i2c_client *client,
612 u8 command, u8 *values); 359 u8 command, u8 length, const u8 *values);
613 extern s32 i2c_smbus_write_block_data(struct i2c_client * client, 360 s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client,
614 u8 command, u8 length, 361 u8 command, u8 length, u8 *values);
615 u8 *values); 362 s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
616 extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client * client, 363 u8 command, u8 length,
617 u8 command, u8 length, u8 *values); 364 const u8 *values);
618 extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client,
619 u8 command, u8 length,
620 u8 *values);
621 365
622These ones were removed from i2c-core because they had no users, but could 366These ones were removed from i2c-core because they had no users, but could
623be added back later if needed: 367be added back later if needed:
624 368
625 extern s32 i2c_smbus_write_quick(struct i2c_client * client, u8 value); 369 s32 i2c_smbus_write_quick(struct i2c_client *client, u8 value);
626 extern s32 i2c_smbus_block_process_call(struct i2c_client *client, 370 s32 i2c_smbus_block_process_call(struct i2c_client *client,
627 u8 command, u8 length, 371 u8 command, u8 length, u8 *values);
628 u8 *values)
629 372
630All these transactions return a negative errno value on failure. The 'write' 373All these transactions return a negative errno value on failure. The 'write'
631transactions return 0 on success; the 'read' transactions return the read 374transactions return 0 on success; the 'read' transactions return the read
@@ -642,7 +385,5 @@ General purpose routines
642Below all general purpose routines are listed, that were not mentioned 385Below all general purpose routines are listed, that were not mentioned
643before. 386before.
644 387
645 /* This call returns a unique low identifier for each registered adapter. 388 /* Return the adapter number for a specific adapter */
646 */ 389 int i2c_adapter_id(struct i2c_adapter *adap);
647 extern int i2c_adapter_id(struct i2c_adapter *adap);
648
diff --git a/Documentation/ia64/xen.txt b/Documentation/ia64/xen.txt
new file mode 100644
index 000000000000..c61a99f7c8bb
--- /dev/null
+++ b/Documentation/ia64/xen.txt
@@ -0,0 +1,183 @@
1 Recipe for getting/building/running Xen/ia64 with pv_ops
2 --------------------------------------------------------
3
4This recipe describes how to get xen-ia64 source and build it,
5and run domU with pv_ops.
6
7============
8Requirements
9============
10
11 - python
12 - mercurial
13 it (aka "hg") is an open-source source code
14 management software. See the below.
15 http://www.selenic.com/mercurial/wiki/
16 - git
17 - bridge-utils
18
19=================================
20Getting and Building Xen and Dom0
21=================================
22
23 My environment is;
24 Machine : Tiger4
25 Domain0 OS : RHEL5
26 DomainU OS : RHEL5
27
28 1. Download source
29 # hg clone http://xenbits.xensource.com/ext/ia64/xen-unstable.hg
30 # cd xen-unstable.hg
31 # hg clone http://xenbits.xensource.com/ext/ia64/linux-2.6.18-xen.hg
32
33 2. # make world
34
35 3. # make install-tools
36
37 4. copy kernels and xen
38 # cp xen/xen.gz /boot/efi/efi/redhat/
39 # cp build-linux-2.6.18-xen_ia64/vmlinux.gz \
40 /boot/efi/efi/redhat/vmlinuz-2.6.18.8-xen
41
42 5. make initrd for Dom0/DomU
43 # make -C linux-2.6.18-xen.hg ARCH=ia64 modules_install \
44 O=$(/bin/pwd)/build-linux-2.6.18-xen_ia64
45 # mkinitrd -f /boot/efi/efi/redhat/initrd-2.6.18.8-xen.img \
46 2.6.18.8-xen --builtin mptspi --builtin mptbase \
47 --builtin mptscsih --builtin uhci-hcd --builtin ohci-hcd \
48 --builtin ehci-hcd
49
50================================
51Making a disk image for guest OS
52================================
53
54 1. make file
55 # dd if=/dev/zero of=/root/rhel5.img bs=1M seek=4096 count=0
56 # mke2fs -F -j /root/rhel5.img
57 # mount -o loop /root/rhel5.img /mnt
58 # cp -ax /{dev,var,etc,usr,bin,sbin,lib} /mnt
59 # mkdir /mnt/{root,proc,sys,home,tmp}
60
61 Note: You may miss some device files. If so, please create them
62 with mknod. Or you can use tar instead of cp.
63
64 2. modify DomU's fstab
65 # vi /mnt/etc/fstab
66 /dev/xvda1 / ext3 defaults 1 1
67 none /dev/pts devpts gid=5,mode=620 0 0
68 none /dev/shm tmpfs defaults 0 0
69 none /proc proc defaults 0 0
70 none /sys sysfs defaults 0 0
71
72 3. modify inittab
73 set runlevel to 3 to avoid X trying to start
74 # vi /mnt/etc/inittab
75 id:3:initdefault:
76 Start a getty on the hvc0 console
77 X0:2345:respawn:/sbin/mingetty hvc0
78 tty1-6 mingetty can be commented out
79
80 4. add hvc0 into /etc/securetty
81 # vi /mnt/etc/securetty (add hvc0)
82
83 5. umount
84 # umount /mnt
85
86FYI, virt-manager can also make a disk image for guest OS.
87It's GUI tools and easy to make it.
88
89==================
90Boot Xen & Domain0
91==================
92
93 1. replace elilo
94 elilo of RHEL5 can boot Xen and Dom0.
95 If you use old elilo (e.g RHEL4), please download from the below
96 http://elilo.sourceforge.net/cgi-bin/blosxom
97 and copy into /boot/efi/efi/redhat/
98 # cp elilo-3.6-ia64.efi /boot/efi/efi/redhat/elilo.efi
99
100 2. modify elilo.conf (like the below)
101 # vi /boot/efi/efi/redhat/elilo.conf
102 prompt
103 timeout=20
104 default=xen
105 relocatable
106
107 image=vmlinuz-2.6.18.8-xen
108 label=xen
109 vmm=xen.gz
110 initrd=initrd-2.6.18.8-xen.img
111 read-only
112 append=" -- rhgb root=/dev/sda2"
113
114The append options before "--" are for xen hypervisor,
115the options after "--" are for dom0.
116
117FYI, your machine may need console options like
118"com1=19200,8n1 console=vga,com1". For example,
119append="com1=19200,8n1 console=vga,com1 -- rhgb console=tty0 \
120console=ttyS0 root=/dev/sda2"
121
122=====================================
123Getting and Building domU with pv_ops
124=====================================
125
126 1. get pv_ops tree
127 # git clone http://people.valinux.co.jp/~yamahata/xen-ia64/linux-2.6-xen-ia64.git/
128
129 2. git branch (if necessary)
130 # cd linux-2.6-xen-ia64/
131 # git checkout -b your_branch origin/xen-ia64-domu-minimal-2008may19
132 (Note: The current branch is xen-ia64-domu-minimal-2008may19.
133 But you would find the new branch. You can see with
134 "git branch -r" to get the branch lists.
135 http://people.valinux.co.jp/~yamahata/xen-ia64/for_eagl/linux-2.6-ia64-pv-ops.git/
136 is also available. The tree is based on
137 git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6 test)
138
139
140 3. copy .config for pv_ops of domU
141 # cp arch/ia64/configs/xen_domu_wip_defconfig .config
142
143 4. make kernel with pv_ops
144 # make oldconfig
145 # make
146
147 5. install the kernel and initrd
148 # cp vmlinux.gz /boot/efi/efi/redhat/vmlinuz-2.6-pv_ops-xenU
149 # make modules_install
150 # mkinitrd -f /boot/efi/efi/redhat/initrd-2.6-pv_ops-xenU.img \
151 2.6.26-rc3xen-ia64-08941-g1b12161 --builtin mptspi \
152 --builtin mptbase --builtin mptscsih --builtin uhci-hcd \
153 --builtin ohci-hcd --builtin ehci-hcd
154
155========================
156Boot DomainU with pv_ops
157========================
158
159 1. make config of DomU
160 # vi /etc/xen/rhel5
161 kernel = "/boot/efi/efi/redhat/vmlinuz-2.6-pv_ops-xenU"
162 ramdisk = "/boot/efi/efi/redhat/initrd-2.6-pv_ops-xenU.img"
163 vcpus = 1
164 memory = 512
165 name = "rhel5"
166 disk = [ 'file:/root/rhel5.img,xvda1,w' ]
167 root = "/dev/xvda1 ro"
168 extra= "rhgb console=hvc0"
169
170 2. After boot xen and dom0, start xend
171 # /etc/init.d/xend start
172 ( In the debugging case, # XEND_DEBUG=1 xend trace_start )
173
174 3. start domU
175 # xm create -c rhel5
176
177=========
178Reference
179=========
180- Wiki of Xen/IA64 upstream merge
181 http://wiki.xensource.com/xenwiki/XenIA64/UpstreamMerge
182
183Written by Akio Takebe <takebe_akio@jp.fujitsu.com> on 28 May 2008
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index 0705040531a5..3f4bc840da8b 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -109,7 +109,8 @@ There are two possible methods of using Kdump.
1092) Or use the system kernel binary itself as dump-capture kernel and there is 1092) Or use the system kernel binary itself as dump-capture kernel and there is
110 no need to build a separate dump-capture kernel. This is possible 110 no need to build a separate dump-capture kernel. This is possible
111 only with the architecutres which support a relocatable kernel. As 111 only with the architecutres which support a relocatable kernel. As
112 of today, i386, x86_64 and ia64 architectures support relocatable kernel. 112 of today, i386, x86_64, ppc64 and ia64 architectures support relocatable
113 kernel.
113 114
114Building a relocatable kernel is advantageous from the point of view that 115Building a relocatable kernel is advantageous from the point of view that
115one does not have to build a second kernel for capturing the dump. But 116one does not have to build a second kernel for capturing the dump. But
@@ -207,8 +208,15 @@ Dump-capture kernel config options (Arch Dependent, i386 and x86_64)
207Dump-capture kernel config options (Arch Dependent, ppc64) 208Dump-capture kernel config options (Arch Dependent, ppc64)
208---------------------------------------------------------- 209----------------------------------------------------------
209 210
210* Make and install the kernel and its modules. DO NOT add this kernel 2111) Enable "Build a kdump crash kernel" support under "Kernel" options:
211 to the boot loader configuration files. 212
213 CONFIG_CRASH_DUMP=y
214
2152) Enable "Build a relocatable kernel" support
216
217 CONFIG_RELOCATABLE=y
218
219 Make and install the kernel and its modules.
212 220
213Dump-capture kernel config options (Arch Dependent, ia64) 221Dump-capture kernel config options (Arch Dependent, ia64)
214---------------------------------------------------------- 222----------------------------------------------------------
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index de4063cb4fdc..02ea9a971b8e 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -1917,6 +1917,8 @@ platforms are moved over to use the flattened-device-tree model.
1917 inverse clock polarity (CPOL) mode 1917 inverse clock polarity (CPOL) mode
1918 - spi-cpha - (optional) Empty property indicating device requires 1918 - spi-cpha - (optional) Empty property indicating device requires
1919 shifted clock phase (CPHA) mode 1919 shifted clock phase (CPHA) mode
1920 - spi-cs-high - (optional) Empty property indicating device requires
1921 chip select active high
1920 1922
1921 SPI example for an MPC5200 SPI bus: 1923 SPI example for an MPC5200 SPI bus:
1922 spi@f00 { 1924 spi@f00 {
diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/powerpc/dts-bindings/fsl/board.txt
index 74ae6f1cd2d6..81a917ef96e9 100644
--- a/Documentation/powerpc/dts-bindings/fsl/board.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/board.txt
@@ -2,13 +2,13 @@
2 2
3Required properties: 3Required properties:
4 4
5 - device_type : Should be "board-control" 5 - compatible : Should be "fsl,<board>-bcsr"
6 - reg : Offset and length of the register set for the device 6 - reg : Offset and length of the register set for the device
7 7
8Example: 8Example:
9 9
10 bcsr@f8000000 { 10 bcsr@f8000000 {
11 device_type = "board-control"; 11 compatible = "fsl,mpc8360mds-bcsr";
12 reg = <f8000000 8000>; 12 reg = <f8000000 8000>;
13 }; 13 };
14 14
diff --git a/Documentation/usb/WUSB-Design-overview.txt b/Documentation/usb/WUSB-Design-overview.txt
new file mode 100644
index 000000000000..4c3d62c7843a
--- /dev/null
+++ b/Documentation/usb/WUSB-Design-overview.txt
@@ -0,0 +1,448 @@
1
2Linux UWB + Wireless USB + WiNET
3
4 (C) 2005-2006 Intel Corporation
5 Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License version
9 2 as published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 02110-1301, USA.
20
21
22Please visit http://bughost.org/thewiki/Design-overview.txt-1.8 for
23updated content.
24
25 * Design-overview.txt-1.8
26
27This code implements a Ultra Wide Band stack for Linux, as well as
28drivers for the the USB based UWB radio controllers defined in the
29Wireless USB 1.0 specification (including Wireless USB host controller
30and an Intel WiNET controller).
31
32 1. Introduction
33 1. HWA: Host Wire adapters, your Wireless USB dongle
34
35 2. DWA: Device Wired Adaptor, a Wireless USB hub for wired
36 devices
37 3. WHCI: Wireless Host Controller Interface, the PCI WUSB host
38 adapter
39 2. The UWB stack
40 1. Devices and hosts: the basic structure
41
42 2. Host Controller life cycle
43
44 3. On the air: beacons and enumerating the radio neighborhood
45
46 4. Device lists
47 5. Bandwidth allocation
48
49 3. Wireless USB Host Controller drivers
50
51 4. Glossary
52
53
54 Introduction
55
56UWB is a wide-band communication protocol that is to serve also as the
57low-level protocol for others (much like TCP sits on IP). Currently
58these others are Wireless USB and TCP/IP, but seems Bluetooth and
59Firewire/1394 are coming along.
60
61UWB uses a band from roughly 3 to 10 GHz, transmitting at a max of
62~-41dB (or 0.074 uW/MHz--geography specific data is still being
63negotiated w/ regulators, so watch for changes). That band is divided in
64a bunch of ~1.5 GHz wide channels (or band groups) composed of three
65subbands/subchannels (528 MHz each). Each channel is independent of each
66other, so you could consider them different "busses". Initially this
67driver considers them all a single one.
68
69Radio time is divided in 65536 us long /superframes/, each one divided
70in 256 256us long /MASs/ (Media Allocation Slots), which are the basic
71time/media allocation units for transferring data. At the beginning of
72each superframe there is a Beacon Period (BP), where every device
73transmit its beacon on a single MAS. The length of the BP depends on how
74many devices are present and the length of their beacons.
75
76Devices have a MAC (fixed, 48 bit address) and a device (changeable, 16
77bit address) and send periodic beacons to advertise themselves and pass
78info on what they are and do. They advertise their capabilities and a
79bunch of other stuff.
80
81The different logical parts of this driver are:
82
83 *
84
85 *UWB*: the Ultra-Wide-Band stack -- manages the radio and
86 associated spectrum to allow for devices sharing it. Allows to
87 control bandwidth assingment, beaconing, scanning, etc
88
89 *
90
91 *WUSB*: the layer that sits on top of UWB to provide Wireless USB.
92 The Wireless USB spec defines means to control a UWB radio and to
93 do the actual WUSB.
94
95
96 HWA: Host Wire adapters, your Wireless USB dongle
97
98WUSB also defines a device called a Host Wire Adaptor (HWA), which in
99mere terms is a USB dongle that enables your PC to have UWB and Wireless
100USB. The Wireless USB Host Controller in a HWA looks to the host like a
101[Wireless] USB controller connected via USB (!)
102
103The HWA itself is broken in two or three main interfaces:
104
105 *
106
107 *RC*: Radio control -- this implements an interface to the
108 Ultra-Wide-Band radio controller. The driver for this implements a
109 USB-based UWB Radio Controller to the UWB stack.
110
111 *
112
113 *HC*: the wireless USB host controller. It looks like a USB host
114 whose root port is the radio and the WUSB devices connect to it.
115 To the system it looks like a separate USB host. The driver (will)
116 implement a USB host controller (similar to UHCI, OHCI or EHCI)
117 for which the root hub is the radio...To reiterate: it is a USB
118 controller that is connected via USB instead of PCI.
119
120 *
121
122 *WINET*: some HW provide a WiNET interface (IP over UWB). This
123 package provides a driver for it (it looks like a network
124 interface, winetX). The driver detects when there is a link up for
125 their type and kick into gear.
126
127
128 DWA: Device Wired Adaptor, a Wireless USB hub for wired devices
129
130These are the complement to HWAs. They are a USB host for connecting
131wired devices, but it is connected to your PC connected via Wireless
132USB. To the system it looks like yet another USB host. To the untrained
133eye, it looks like a hub that connects upstream wirelessly.
134
135We still offer no support for this; however, it should share a lot of
136code with the HWA-RC driver; there is a bunch of factorization work that
137has been done to support that in upcoming releases.
138
139
140 WHCI: Wireless Host Controller Interface, the PCI WUSB host adapter
141
142This is your usual PCI device that implements WHCI. Similar in concept
143to EHCI, it allows your wireless USB devices (including DWAs) to connect
144to your host via a PCI interface. As in the case of the HWA, it has a
145Radio Control interface and the WUSB Host Controller interface per se.
146
147There is still no driver support for this, but will be in upcoming
148releases.
149
150
151 The UWB stack
152
153The main mission of the UWB stack is to keep a tally of which devices
154are in radio proximity to allow drivers to connect to them. As well, it
155provides an API for controlling the local radio controllers (RCs from
156now on), such as to start/stop beaconing, scan, allocate bandwidth, etc.
157
158
159 Devices and hosts: the basic structure
160
161The main building block here is the UWB device (struct uwb_dev). For
162each device that pops up in radio presence (ie: the UWB host receives a
163beacon from it) you get a struct uwb_dev that will show up in
164/sys/class/uwb and in /sys/bus/uwb/devices.
165
166For each RC that is detected, a new struct uwb_rc is created. In turn, a
167RC is also a device, so they also show in /sys/class/uwb and
168/sys/bus/uwb/devices, but at the same time, only radio controllers show
169up in /sys/class/uwb_rc.
170
171 *
172
173 [*] The reason for RCs being also devices is that not only we can
174 see them while enumerating the system device tree, but also on the
175 radio (their beacons and stuff), so the handling has to be
176 likewise to that of a device.
177
178Each RC driver is implemented by a separate driver that plugs into the
179interface that the UWB stack provides through a struct uwb_rc_ops. The
180spec creators have been nice enough to make the message format the same
181for HWA and WHCI RCs, so the driver is really a very thin transport that
182moves the requests from the UWB API to the device [/uwb_rc_ops->cmd()/]
183and sends the replies and notifications back to the API
184[/uwb_rc_neh_grok()/]. Notifications are handled to the UWB daemon, that
185is chartered, among other things, to keep the tab of how the UWB radio
186neighborhood looks, creating and destroying devices as they show up or
187dissapear.
188
189Command execution is very simple: a command block is sent and a event
190block or reply is expected back. For sending/receiving command/events, a
191handle called /neh/ (Notification/Event Handle) is opened with
192/uwb_rc_neh_open()/.
193
194The HWA-RC (USB dongle) driver (drivers/uwb/hwa-rc.c) does this job for
195the USB connected HWA. Eventually, drivers/whci-rc.c will do the same
196for the PCI connected WHCI controller.
197
198
199 Host Controller life cycle
200
201So let's say we connect a dongle to the system: it is detected and
202firmware uploaded if needed [for Intel's i1480
203/drivers/uwb/ptc/usb.c:ptc_usb_probe()/] and then it is reenumerated.
204Now we have a real HWA device connected and
205/drivers/uwb/hwa-rc.c:hwarc_probe()/ picks it up, that will set up the
206Wire-Adaptor environment and then suck it into the UWB stack's vision of
207the world [/drivers/uwb/lc-rc.c:uwb_rc_add()/].
208
209 *
210
211 [*] The stack should put a new RC to scan for devices
212 [/uwb_rc_scan()/] so it finds what's available around and tries to
213 connect to them, but this is policy stuff and should be driven
214 from user space. As of now, the operator is expected to do it
215 manually; see the release notes for documentation on the procedure.
216
217When a dongle is disconnected, /drivers/uwb/hwa-rc.c:hwarc_disconnect()/
218takes time of tearing everything down safely (or not...).
219
220
221 On the air: beacons and enumerating the radio neighborhood
222
223So assuming we have devices and we have agreed for a channel to connect
224on (let's say 9), we put the new RC to beacon:
225
226 *
227
228 $ echo 9 0 > /sys/class/uwb_rc/uwb0/beacon
229
230Now it is visible. If there were other devices in the same radio channel
231and beacon group (that's what the zero is for), the dongle's radio
232control interface will send beacon notifications on its
233notification/event endpoint (NEEP). The beacon notifications are part of
234the event stream that is funneled into the API with
235/drivers/uwb/neh.c:uwb_rc_neh_grok()/ and delivered to the UWBD, the UWB
236daemon through a notification list.
237
238UWBD wakes up and scans the event list; finds a beacon and adds it to
239the BEACON CACHE (/uwb_beca/). If he receives a number of beacons from
240the same device, he considers it to be 'onair' and creates a new device
241[/drivers/uwb/lc-dev.c:uwbd_dev_onair()/]. Similarly, when no beacons
242are received in some time, the device is considered gone and wiped out
243[uwbd calls periodically /uwb/beacon.c:uwb_beca_purge()/ that will purge
244the beacon cache of dead devices].
245
246
247 Device lists
248
249All UWB devices are kept in the list of the struct bus_type uwb_bus.
250
251
252 Bandwidth allocation
253
254The UWB stack maintains a local copy of DRP availability through
255processing of incoming *DRP Availability Change* notifications. This
256local copy is currently used to present the current bandwidth
257availability to the user through the sysfs file
258/sys/class/uwb_rc/uwbx/bw_avail. In the future the bandwidth
259availability information will be used by the bandwidth reservation
260routines.
261
262The bandwidth reservation routines are in progress and are thus not
263present in the current release. When completed they will enable a user
264to initiate DRP reservation requests through interaction with sysfs. DRP
265reservation requests from remote UWB devices will also be handled. The
266bandwidth management done by the UWB stack will include callbacks to the
267higher layers will enable the higher layers to use the reservations upon
268completion. [Note: The bandwidth reservation work is in progress and
269subject to change.]
270
271
272 Wireless USB Host Controller drivers
273
274*WARNING* This section needs a lot of work!
275
276As explained above, there are three different types of HCs in the WUSB
277world: HWA-HC, DWA-HC and WHCI-HC.
278
279HWA-HC and DWA-HC share that they are Wire-Adapters (USB or WUSB
280connected controllers), and their transfer management system is almost
281identical. So is their notification delivery system.
282
283HWA-HC and WHCI-HC share that they are both WUSB host controllers, so
284they have to deal with WUSB device life cycle and maintenance, wireless
285root-hub
286
287HWA exposes a Host Controller interface (HWA-HC 0xe0/02/02). This has
288three endpoints (Notifications, Data Transfer In and Data Transfer
289Out--known as NEP, DTI and DTO in the code).
290
291We reserve UWB bandwidth for our Wireless USB Cluster, create a Cluster
292ID and tell the HC to use all that. Then we start it. This means the HC
293starts sending MMCs.
294
295 *
296
297 The MMCs are blocks of data defined somewhere in the WUSB1.0 spec
298 that define a stream in the UWB channel time allocated for sending
299 WUSB IEs (host to device commands/notifications) and Device
300 Notifications (device initiated to host). Each host defines a
301 unique Wireless USB cluster through MMCs. Devices can connect to a
302 single cluster at the time. The IEs are Information Elements, and
303 among them are the bandwidth allocations that tell each device
304 when can they transmit or receive.
305
306Now it all depends on external stimuli.
307
308*New device connection*
309
310A new device pops up, it scans the radio looking for MMCs that give out
311the existence of Wireless USB channels. Once one (or more) are found,
312selects which one to connect to. Sends a /DN_Connect/ (device
313notification connect) during the DNTS (Device Notification Time
314Slot--announced in the MMCs
315
316HC picks the /DN_Connect/ out (nep module sends to notif.c for delivery
317into /devconnect/). This process starts the authentication process for
318the device. First we allocate a /fake port/ and assign an
319unauthenticated address (128 to 255--what we really do is
3200x80 | fake_port_idx). We fiddle with the fake port status and /khubd/
321sees a new connection, so he moves on to enable the fake port with a reset.
322
323So now we are in the reset path -- we know we have a non-yet enumerated
324device with an unauthorized address; we ask user space to authenticate
325(FIXME: not yet done, similar to bluetooth pairing), then we do the key
326exchange (FIXME: not yet done) and issue a /set address 0/ to bring the
327device to the default state. Device is authenticated.
328
329From here, the USB stack takes control through the usb_hcd ops. khubd
330has seen the port status changes, as we have been toggling them. It will
331start enumerating and doing transfers through usb_hcd->urb_enqueue() to
332read descriptors and move our data.
333
334*Device life cycle and keep alives*
335
336Everytime there is a succesful transfer to/from a device, we update a
337per-device activity timestamp. If not, every now and then we check and
338if the activity timestamp gets old, we ping the device by sending it a
339Keep Alive IE; it responds with a /DN_Alive/ pong during the DNTS (this
340arrives to us as a notification through
341devconnect.c:wusb_handle_dn_alive(). If a device times out, we
342disconnect it from the system (cleaning up internal information and
343toggling the bits in the fake hub port, which kicks khubd into removing
344the rest of the stuff).
345
346This is done through devconnect:__wusb_check_devs(), which will scan the
347device list looking for whom needs refreshing.
348
349If the device wants to disconnect, it will either die (ugly) or send a
350/DN_Disconnect/ that will prompt a disconnection from the system.
351
352*Sending and receiving data*
353
354Data is sent and received through /Remote Pipes/ (rpipes). An rpipe is
355/aimed/ at an endpoint in a WUSB device. This is the same for HWAs and
356DWAs.
357
358Each HC has a number of rpipes and buffers that can be assigned to them;
359when doing a data transfer (xfer), first the rpipe has to be aimed and
360prepared (buffers assigned), then we can start queueing requests for
361data in or out.
362
363Data buffers have to be segmented out before sending--so we send first a
364header (segment request) and then if there is any data, a data buffer
365immediately after to the DTI interface (yep, even the request). If our
366buffer is bigger than the max segment size, then we just do multiple
367requests.
368
369[This sucks, because doing USB scatter gatter in Linux is resource
370intensive, if any...not that the current approach is not. It just has to
371be cleaned up a lot :)].
372
373If reading, we don't send data buffers, just the segment headers saying
374we want to read segments.
375
376When the xfer is executed, we receive a notification that says data is
377ready in the DTI endpoint (handled through
378xfer.c:wa_handle_notif_xfer()). In there we read from the DTI endpoint a
379descriptor that gives us the status of the transfer, its identification
380(given when we issued it) and the segment number. If it was a data read,
381we issue another URB to read into the destination buffer the chunk of
382data coming out of the remote endpoint. Done, wait for the next guy. The
383callbacks for the URBs issued from here are the ones that will declare
384the xfer complete at some point and call it's callback.
385
386Seems simple, but the implementation is not trivial.
387
388 *
389
390 *WARNING* Old!!
391
392The main xfer descriptor, wa_xfer (equivalent to a URB) contains an
393array of segments, tallys on segments and buffers and callback
394information. Buried in there is a lot of URBs for executing the segments
395and buffer transfers.
396
397For OUT xfers, there is an array of segments, one URB for each, another
398one of buffer URB. When submitting, we submit URBs for segment request
3991, buffer 1, segment 2, buffer 2...etc. Then we wait on the DTI for xfer
400result data; when all the segments are complete, we call the callback to
401finalize the transfer.
402
403For IN xfers, we only issue URBs for the segments we want to read and
404then wait for the xfer result data.
405
406*URB mapping into xfers*
407
408This is done by hwahc_op_urb_[en|de]queue(). In enqueue() we aim an
409rpipe to the endpoint where we have to transmit, create a transfer
410context (wa_xfer) and submit it. When the xfer is done, our callback is
411called and we assign the status bits and release the xfer resources.
412
413In dequeue() we are basically cancelling/aborting the transfer. We issue
414a xfer abort request to the HC, cancell all the URBs we had submitted
415and not yet done and when all that is done, the xfer callback will be
416called--this will call the URB callback.
417
418
419 Glossary
420
421*DWA* -- Device Wire Adapter
422
423USB host, wired for downstream devices, upstream connects wirelessly
424with Wireless USB.
425
426*EVENT* -- Response to a command on the NEEP
427
428*HWA* -- Host Wire Adapter / USB dongle for UWB and Wireless USB
429
430*NEH* -- Notification/Event Handle
431
432Handle/file descriptor for receiving notifications or events. The WA
433code requires you to get one of this to listen for notifications or
434events on the NEEP.
435
436*NEEP* -- Notification/Event EndPoint
437
438Stuff related to the management of the first endpoint of a HWA USB
439dongle that is used to deliver an stream of events and notifications to
440the host.
441
442*NOTIFICATION* -- Message coming in the NEEP as response to something.
443
444*RC* -- Radio Control
445
446Design-overview.txt-1.8 (last edited 2006-11-04 12:22:24 by
447InakyPerezGonzalez)
448
diff --git a/Documentation/usb/wusb-cbaf b/Documentation/usb/wusb-cbaf
new file mode 100644
index 000000000000..2e78b70f3adc
--- /dev/null
+++ b/Documentation/usb/wusb-cbaf
@@ -0,0 +1,139 @@
1#! /bin/bash
2#
3
4set -e
5
6progname=$(basename $0)
7function help
8{
9 cat <<EOF
10Usage: $progname COMMAND DEVICEs [ARGS]
11
12Command for manipulating the pairing/authentication credentials of a
13Wireless USB device that supports wired-mode Cable-Based-Association.
14
15Works in conjunction with the wusb-cba.ko driver from http://linuxuwb.org.
16
17
18DEVICE
19
20 sysfs path to the device to authenticate; for example, both this
21 guys are the same:
22
23 /sys/devices/pci0000:00/0000:00:1d.7/usb1/1-4/1-4.4/1-4.4:1.1
24 /sys/bus/usb/drivers/wusb-cbaf/1-4.4:1.1
25
26COMMAND/ARGS are
27
28 start
29
30 Start a WUSB host controller (by setting up a CHID)
31
32 set-chid DEVICE HOST-CHID HOST-BANDGROUP HOST-NAME
33
34 Sets host information in the device; after this you can call the
35 get-cdid to see how does this device report itself to us.
36
37 get-cdid DEVICE
38
39 Get the device ID associated to the HOST-CHDI we sent with
40 'set-chid'. We might not know about it.
41
42 set-cc DEVICE
43
44 If we allow the device to connect, set a random new CDID and CK
45 (connection key). Device saves them for the next time it wants to
46 connect wireless. We save them for that next time also so we can
47 authenticate the device (when we see the CDID he uses to id
48 itself) and the CK to crypto talk to it.
49
50CHID is always 16 hex bytes in 'XX YY ZZ...' form
51BANDGROUP is almost always 0001
52
53Examples:
54
55 You can default most arguments to '' to get a sane value:
56
57 $ $progname set-chid '' '' '' "My host name"
58
59 A full sequence:
60
61 $ $progname set-chid '' '' '' "My host name"
62 $ $progname get-cdid ''
63 $ $progname set-cc ''
64
65EOF
66}
67
68
69# Defaults
70# FIXME: CHID should come from a database :), band group from the host
71host_CHID="00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff"
72host_band_group="0001"
73host_name=$(hostname)
74
75devs="$(echo /sys/bus/usb/drivers/wusb-cbaf/[0-9]*)"
76hdevs="$(for h in /sys/class/uwb_rc/*/wusbhc; do readlink -f $h; done)"
77
78result=0
79case $1 in
80 start)
81 for dev in ${2:-$hdevs}
82 do
83 uwb_rc=$(readlink -f $dev/uwb_rc)
84 if cat $uwb_rc/beacon | grep -q -- "-1"
85 then
86 echo 13 0 > $uwb_rc/beacon
87 echo I: started beaconing on ch 13 on $(basename $uwb_rc) >&2
88 fi
89 echo $host_CHID > $dev/wusb_chid
90 echo I: started host $(basename $dev) >&2
91 done
92 ;;
93 stop)
94 for dev in ${2:-$hdevs}
95 do
96 echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid
97 echo I: stopped host $(basename $dev) >&2
98 uwb_rc=$(readlink -f $dev/uwb_rc)
99 echo -1 | cat > $uwb_rc/beacon
100 echo I: stopped beaconing on $(basename $uwb_rc) >&2
101 done
102 ;;
103 set-chid)
104 shift
105 for dev in ${2:-$devs}; do
106 echo "${4:-$host_name}" > $dev/wusb_host_name
107 echo "${3:-$host_band_group}" > $dev/wusb_host_band_groups
108 echo ${2:-$host_CHID} > $dev/wusb_chid
109 done
110 ;;
111 get-cdid)
112 for dev in ${2:-$devs}
113 do
114 cat $dev/wusb_cdid
115 done
116 ;;
117 set-cc)
118 for dev in ${2:-$devs}; do
119 shift
120 CDID="$(head --bytes=16 /dev/urandom | od -tx1 -An)"
121 CK="$(head --bytes=16 /dev/urandom | od -tx1 -An)"
122 echo "$CDID" > $dev/wusb_cdid
123 echo "$CK" > $dev/wusb_ck
124
125 echo I: CC set >&2
126 echo "CHID: $(cat $dev/wusb_chid)"
127 echo "CDID:$CDID"
128 echo "CK: $CK"
129 done
130 ;;
131 help|h|--help|-h)
132 help
133 ;;
134 *)
135 echo "E: Unknown usage" 1>&2
136 help 1>&2
137 result=1
138esac
139exit $result
diff --git a/MAINTAINERS b/MAINTAINERS
index 5c3f79c26384..67fa3cff1749 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1053,6 +1053,12 @@ L: cbe-oss-dev@ozlabs.org
1053W: http://www.ibm.com/developerworks/power/cell/ 1053W: http://www.ibm.com/developerworks/power/cell/
1054S: Supported 1054S: Supported
1055 1055
1056CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
1057P: David Vrabel
1058M: david.vrabel@csr.com
1059L: linux-usb@vger.kernel.org
1060S: Supported
1061
1056CFAG12864B LCD DRIVER 1062CFAG12864B LCD DRIVER
1057P: Miguel Ojeda Sandonis 1063P: Miguel Ojeda Sandonis
1058M: miguel.ojeda.sandonis@gmail.com 1064M: miguel.ojeda.sandonis@gmail.com
@@ -2176,6 +2182,13 @@ M: maciej.sosnowski@intel.com
2176L: linux-kernel@vger.kernel.org 2182L: linux-kernel@vger.kernel.org
2177S: Supported 2183S: Supported
2178 2184
2185INTEL IOMMU (VT-d)
2186P: David Woodhouse
2187M: dwmw2@infradead.org
2188L: iommu@lists.linux-foundation.org
2189T: git://git.infradead.org/iommu-2.6.git
2190S: Supported
2191
2179INTEL IOP-ADMA DMA DRIVER 2192INTEL IOP-ADMA DMA DRIVER
2180P: Dan Williams 2193P: Dan Williams
2181M: dan.j.williams@intel.com 2194M: dan.j.williams@intel.com
@@ -2928,9 +2941,9 @@ S: Maintained
2928 2941
2929NETEFFECT IWARP RNIC DRIVER (IW_NES) 2942NETEFFECT IWARP RNIC DRIVER (IW_NES)
2930P: Faisal Latif 2943P: Faisal Latif
2931M: flatif@neteffect.com 2944M: faisal.latif@intel.com
2932P: Chien Tung 2945P: Chien Tung
2933M: ctung@neteffect.com 2946M: chien.tin.tung@intel.com
2934L: general@lists.openfabrics.org 2947L: general@lists.openfabrics.org
2935W: http://www.neteffect.com 2948W: http://www.neteffect.com
2936S: Supported 2949S: Supported
@@ -3244,11 +3257,6 @@ L: linux-pci@vger.kernel.org
3244T: git kernel.org:/pub/scm/linux/kernel/git/jbarnes/pci-2.6.git 3257T: git kernel.org:/pub/scm/linux/kernel/git/jbarnes/pci-2.6.git
3245S: Supported 3258S: Supported
3246 3259
3247PCI HOTPLUG CORE
3248P: Kristen Carlson Accardi
3249M: kristen.c.accardi@intel.com
3250S: Supported
3251
3252PCIE HOTPLUG DRIVER 3260PCIE HOTPLUG DRIVER
3253P: Kristen Carlson Accardi 3261P: Kristen Carlson Accardi
3254M: kristen.c.accardi@intel.com 3262M: kristen.c.accardi@intel.com
@@ -4191,6 +4199,12 @@ L: sparclinux@vger.kernel.org
4191T: git kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6.git 4199T: git kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6.git
4192S: Maintained 4200S: Maintained
4193 4201
4202ULTRA-WIDEBAND (UWB) SUBSYSTEM:
4203P: David Vrabel
4204M: david.vrabel@csr.com
4205L: linux-usb@vger.kernel.org
4206S: Supported
4207
4194UNIFORM CDROM DRIVER 4208UNIFORM CDROM DRIVER
4195P: Jens Axboe 4209P: Jens Axboe
4196M: axboe@kernel.dk 4210M: axboe@kernel.dk
@@ -4616,6 +4630,11 @@ M: zaga@fly.cc.fer.hr
4616L: linux-scsi@vger.kernel.org 4630L: linux-scsi@vger.kernel.org
4617S: Maintained 4631S: Maintained
4618 4632
4633WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM
4634P: David Vrabel
4635M: david.vrabel@csr.com
4636S: Maintained
4637
4619WISTRON LAPTOP BUTTON DRIVER 4638WISTRON LAPTOP BUTTON DRIVER
4620P: Miloslav Trmac 4639P: Miloslav Trmac
4621M: mitr@volny.cz 4640M: mitr@volny.cz
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index 7c3d5ec6ec67..bd8ac533a504 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -106,7 +106,7 @@ op_axp_stop(void)
106} 106}
107 107
108static int 108static int
109op_axp_create_files(struct super_block * sb, struct dentry * root) 109op_axp_create_files(struct super_block *sb, struct dentry *root)
110{ 110{
111 int i; 111 int i;
112 112
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index df39d20f7425..f504c801792f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -356,7 +356,7 @@ config ARCH_IXP4XX
356 select GENERIC_GPIO 356 select GENERIC_GPIO
357 select GENERIC_TIME 357 select GENERIC_TIME
358 select GENERIC_CLOCKEVENTS 358 select GENERIC_CLOCKEVENTS
359 select ZONE_DMA if PCI 359 select DMABOUNCE if PCI
360 help 360 help
361 Support for Intel's IXP4XX (XScale) family of processors. 361 Support for Intel's IXP4XX (XScale) family of processors.
362 362
@@ -1256,6 +1256,8 @@ source "drivers/hid/Kconfig"
1256 1256
1257source "drivers/usb/Kconfig" 1257source "drivers/usb/Kconfig"
1258 1258
1259source "drivers/uwb/Kconfig"
1260
1259source "drivers/mmc/Kconfig" 1261source "drivers/mmc/Kconfig"
1260 1262
1261source "drivers/memstick/Kconfig" 1263source "drivers/memstick/Kconfig"
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 2e32acca02fb..86b5e6982660 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -13,10 +13,10 @@ config ICST307
13config SA1111 13config SA1111
14 bool 14 bool
15 select DMABOUNCE if !ARCH_PXA 15 select DMABOUNCE if !ARCH_PXA
16 select ZONE_DMA if !ARCH_PXA
17 16
18config DMABOUNCE 17config DMABOUNCE
19 bool 18 bool
19 select ZONE_DMA
20 20
21config TIMER_ACORN 21config TIMER_ACORN
22 bool 22 bool
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index fb86f248aab8..47ccec95f3e8 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -581,6 +581,7 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
581 goto out; 581 goto out;
582 } 582 }
583 583
584#ifdef CONFIG_DMABOUNCE
584 /* 585 /*
585 * If the parent device has a DMA mask associated with it, 586 * If the parent device has a DMA mask associated with it,
586 * propagate it down to the children. 587 * propagate it down to the children.
@@ -598,6 +599,7 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
598 } 599 }
599 } 600 }
600 } 601 }
602#endif
601 603
602out: 604out:
603 return ret; 605 return ret;
@@ -937,7 +939,7 @@ static int sa1111_resume(struct platform_device *dev)
937#define sa1111_resume NULL 939#define sa1111_resume NULL
938#endif 940#endif
939 941
940static int sa1111_probe(struct platform_device *pdev) 942static int __devinit sa1111_probe(struct platform_device *pdev)
941{ 943{
942 struct resource *mem; 944 struct resource *mem;
943 int irq; 945 int irq;
diff --git a/arch/arm/configs/trizeps4_defconfig b/arch/arm/configs/trizeps4_defconfig
index 8b7a431a8bfc..9033d147f052 100644
--- a/arch/arm/configs/trizeps4_defconfig
+++ b/arch/arm/configs/trizeps4_defconfig
@@ -147,6 +147,7 @@ CONFIG_ARCH_PXA=y
147# CONFIG_MACH_MAINSTONE is not set 147# CONFIG_MACH_MAINSTONE is not set
148# CONFIG_ARCH_PXA_IDP is not set 148# CONFIG_ARCH_PXA_IDP is not set
149# CONFIG_PXA_SHARPSL is not set 149# CONFIG_PXA_SHARPSL is not set
150CONFIG_TRIZEPS_PXA=y
150CONFIG_MACH_TRIZEPS4=y 151CONFIG_MACH_TRIZEPS4=y
151CONFIG_MACH_TRIZEPS4_CONXS=y 152CONFIG_MACH_TRIZEPS4_CONXS=y
152# CONFIG_MACH_TRIZEPS4_ANY is not set 153# CONFIG_MACH_TRIZEPS4_ANY is not set
diff --git a/arch/arm/mach-clps711x/include/mach/memory.h b/arch/arm/mach-clps711x/include/mach/memory.h
index 71c2fa70c8e8..98ec30c97bbe 100644
--- a/arch/arm/mach-clps711x/include/mach/memory.h
+++ b/arch/arm/mach-clps711x/include/mach/memory.h
@@ -89,6 +89,8 @@
89 * node 3: 0xd8000000 - 0xdfffffff 89 * node 3: 0xd8000000 - 0xdfffffff
90 */ 90 */
91#define NODE_MEM_SIZE_BITS 24 91#define NODE_MEM_SIZE_BITS 24
92#define SECTION_SIZE_BITS 24
93#define MAX_PHYSMEM_BITS 32
92 94
93#endif 95#endif
94 96
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index db8b5fe06c0d..2c5a02b8520e 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -167,11 +167,6 @@ config MACH_GTWX5715
167 167
168comment "IXP4xx Options" 168comment "IXP4xx Options"
169 169
170config DMABOUNCE
171 bool
172 default y
173 depends on PCI
174
175config IXP4XX_INDIRECT_PCI 170config IXP4XX_INDIRECT_PCI
176 bool "Use indirect PCI memory access" 171 bool "Use indirect PCI memory access"
177 depends on PCI 172 depends on PCI
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 85cad05d8c5b..0bb1fbd84ccb 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -16,6 +16,7 @@
16#include <linux/mv643xx_eth.h> 16#include <linux/mv643xx_eth.h>
17#include <linux/ata_platform.h> 17#include <linux/ata_platform.h>
18#include <linux/spi/orion_spi.h> 18#include <linux/spi/orion_spi.h>
19#include <net/dsa.h>
19#include <asm/page.h> 20#include <asm/page.h>
20#include <asm/timex.h> 21#include <asm/timex.h>
21#include <asm/mach/map.h> 22#include <asm/mach/map.h>
@@ -152,6 +153,40 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
152 153
153 154
154/***************************************************************************** 155/*****************************************************************************
156 * Ethernet switch
157 ****************************************************************************/
158static struct resource kirkwood_switch_resources[] = {
159 {
160 .start = 0,
161 .end = 0,
162 .flags = IORESOURCE_IRQ,
163 },
164};
165
166static struct platform_device kirkwood_switch_device = {
167 .name = "dsa",
168 .id = 0,
169 .num_resources = 0,
170 .resource = kirkwood_switch_resources,
171};
172
173void __init kirkwood_ge00_switch_init(struct dsa_platform_data *d, int irq)
174{
175 if (irq != NO_IRQ) {
176 kirkwood_switch_resources[0].start = irq;
177 kirkwood_switch_resources[0].end = irq;
178 kirkwood_switch_device.num_resources = 1;
179 }
180
181 d->mii_bus = &kirkwood_ge00_shared.dev;
182 d->netdev = &kirkwood_ge00.dev;
183 kirkwood_switch_device.dev.platform_data = d;
184
185 platform_device_register(&kirkwood_switch_device);
186}
187
188
189/*****************************************************************************
155 * SoC RTC 190 * SoC RTC
156 ****************************************************************************/ 191 ****************************************************************************/
157static struct resource kirkwood_rtc_resource = { 192static struct resource kirkwood_rtc_resource = {
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index 8fa0f6a27635..5774632a67e3 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -11,6 +11,7 @@
11#ifndef __ARCH_KIRKWOOD_COMMON_H 11#ifndef __ARCH_KIRKWOOD_COMMON_H
12#define __ARCH_KIRKWOOD_COMMON_H 12#define __ARCH_KIRKWOOD_COMMON_H
13 13
14struct dsa_platform_data;
14struct mv643xx_eth_platform_data; 15struct mv643xx_eth_platform_data;
15struct mv_sata_platform_data; 16struct mv_sata_platform_data;
16 17
@@ -29,6 +30,7 @@ void kirkwood_pcie_id(u32 *dev, u32 *rev);
29 30
30void kirkwood_ehci_init(void); 31void kirkwood_ehci_init(void);
31void kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data); 32void kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data);
33void kirkwood_ge00_switch_init(struct dsa_platform_data *d, int irq);
32void kirkwood_pcie_init(void); 34void kirkwood_pcie_init(void);
33void kirkwood_rtc_init(void); 35void kirkwood_rtc_init(void);
34void kirkwood_sata_init(struct mv_sata_platform_data *sata_data); 36void kirkwood_sata_init(struct mv_sata_platform_data *sata_data);
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c
index f785093e433f..175054abd630 100644
--- a/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -19,6 +19,7 @@
19#include <linux/ata_platform.h> 19#include <linux/ata_platform.h>
20#include <linux/mv643xx_eth.h> 20#include <linux/mv643xx_eth.h>
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <net/dsa.h>
22#include <asm/mach-types.h> 23#include <asm/mach-types.h>
23#include <asm/mach/arch.h> 24#include <asm/mach/arch.h>
24#include <asm/mach/pci.h> 25#include <asm/mach/pci.h>
@@ -74,6 +75,15 @@ static struct mv643xx_eth_platform_data rd88f6281_ge00_data = {
74 .duplex = DUPLEX_FULL, 75 .duplex = DUPLEX_FULL,
75}; 76};
76 77
78static struct dsa_platform_data rd88f6281_switch_data = {
79 .port_names[0] = "lan1",
80 .port_names[1] = "lan2",
81 .port_names[2] = "lan3",
82 .port_names[3] = "lan4",
83 .port_names[4] = "wan",
84 .port_names[5] = "cpu",
85};
86
77static struct mv_sata_platform_data rd88f6281_sata_data = { 87static struct mv_sata_platform_data rd88f6281_sata_data = {
78 .n_ports = 2, 88 .n_ports = 2,
79}; 89};
@@ -87,6 +97,7 @@ static void __init rd88f6281_init(void)
87 97
88 kirkwood_ehci_init(); 98 kirkwood_ehci_init();
89 kirkwood_ge00_init(&rd88f6281_ge00_data); 99 kirkwood_ge00_init(&rd88f6281_ge00_data);
100 kirkwood_ge00_switch_init(&rd88f6281_switch_data, NO_IRQ);
90 kirkwood_rtc_init(); 101 kirkwood_rtc_init();
91 kirkwood_sata_init(&rd88f6281_sata_data); 102 kirkwood_sata_init(&rd88f6281_sata_data);
92 kirkwood_uart0_init(); 103 kirkwood_uart0_init();
diff --git a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
index 49f434c39eb7..2e285bbb7bbd 100644
--- a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
+++ b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
@@ -13,6 +13,7 @@
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/ata_platform.h> 14#include <linux/ata_platform.h>
15#include <linux/mv643xx_eth.h> 15#include <linux/mv643xx_eth.h>
16#include <linux/ethtool.h>
16#include <mach/mv78xx0.h> 17#include <mach/mv78xx0.h>
17#include <asm/mach-types.h> 18#include <asm/mach-types.h>
18#include <asm/mach/arch.h> 19#include <asm/mach/arch.h>
@@ -28,10 +29,14 @@ static struct mv643xx_eth_platform_data db78x00_ge01_data = {
28 29
29static struct mv643xx_eth_platform_data db78x00_ge10_data = { 30static struct mv643xx_eth_platform_data db78x00_ge10_data = {
30 .phy_addr = MV643XX_ETH_PHY_NONE, 31 .phy_addr = MV643XX_ETH_PHY_NONE,
32 .speed = SPEED_1000,
33 .duplex = DUPLEX_FULL,
31}; 34};
32 35
33static struct mv643xx_eth_platform_data db78x00_ge11_data = { 36static struct mv643xx_eth_platform_data db78x00_ge11_data = {
34 .phy_addr = MV643XX_ETH_PHY_NONE, 37 .phy_addr = MV643XX_ETH_PHY_NONE,
38 .speed = SPEED_1000,
39 .duplex = DUPLEX_FULL,
35}; 40};
36 41
37static struct mv_sata_platform_data db78x00_sata_data = { 42static struct mv_sata_platform_data db78x00_sata_data = {
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 9625ef5975d0..437065c25c9c 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -19,6 +19,7 @@
19#include <linux/mv643xx_i2c.h> 19#include <linux/mv643xx_i2c.h>
20#include <linux/ata_platform.h> 20#include <linux/ata_platform.h>
21#include <linux/spi/orion_spi.h> 21#include <linux/spi/orion_spi.h>
22#include <net/dsa.h>
22#include <asm/page.h> 23#include <asm/page.h>
23#include <asm/setup.h> 24#include <asm/setup.h>
24#include <asm/timex.h> 25#include <asm/timex.h>
@@ -198,6 +199,40 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
198 199
199 200
200/***************************************************************************** 201/*****************************************************************************
202 * Ethernet switch
203 ****************************************************************************/
204static struct resource orion5x_switch_resources[] = {
205 {
206 .start = 0,
207 .end = 0,
208 .flags = IORESOURCE_IRQ,
209 },
210};
211
212static struct platform_device orion5x_switch_device = {
213 .name = "dsa",
214 .id = 0,
215 .num_resources = 0,
216 .resource = orion5x_switch_resources,
217};
218
219void __init orion5x_eth_switch_init(struct dsa_platform_data *d, int irq)
220{
221 if (irq != NO_IRQ) {
222 orion5x_switch_resources[0].start = irq;
223 orion5x_switch_resources[0].end = irq;
224 orion5x_switch_device.num_resources = 1;
225 }
226
227 d->mii_bus = &orion5x_eth_shared.dev;
228 d->netdev = &orion5x_eth.dev;
229 orion5x_switch_device.dev.platform_data = d;
230
231 platform_device_register(&orion5x_switch_device);
232}
233
234
235/*****************************************************************************
201 * I2C 236 * I2C
202 ****************************************************************************/ 237 ****************************************************************************/
203static struct mv64xxx_i2c_pdata orion5x_i2c_pdata = { 238static struct mv64xxx_i2c_pdata orion5x_i2c_pdata = {
@@ -275,7 +310,8 @@ void __init orion5x_sata_init(struct mv_sata_platform_data *sata_data)
275 * SPI 310 * SPI
276 ****************************************************************************/ 311 ****************************************************************************/
277static struct orion_spi_info orion5x_spi_plat_data = { 312static struct orion_spi_info orion5x_spi_plat_data = {
278 .tclk = 0, 313 .tclk = 0,
314 .enable_clock_fix = 1,
279}; 315};
280 316
281static struct resource orion5x_spi_resources[] = { 317static struct resource orion5x_spi_resources[] = {
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index 1f8b2da676a5..a000c7c6ee96 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -1,6 +1,7 @@
1#ifndef __ARCH_ORION5X_COMMON_H 1#ifndef __ARCH_ORION5X_COMMON_H
2#define __ARCH_ORION5X_COMMON_H 2#define __ARCH_ORION5X_COMMON_H
3 3
4struct dsa_platform_data;
4struct mv643xx_eth_platform_data; 5struct mv643xx_eth_platform_data;
5struct mv_sata_platform_data; 6struct mv_sata_platform_data;
6 7
@@ -29,6 +30,7 @@ void orion5x_setup_pcie_wa_win(u32 base, u32 size);
29void orion5x_ehci0_init(void); 30void orion5x_ehci0_init(void);
30void orion5x_ehci1_init(void); 31void orion5x_ehci1_init(void);
31void orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data); 32void orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data);
33void orion5x_eth_switch_init(struct dsa_platform_data *d, int irq);
32void orion5x_i2c_init(void); 34void orion5x_i2c_init(void);
33void orion5x_sata_init(struct mv_sata_platform_data *sata_data); 35void orion5x_sata_init(struct mv_sata_platform_data *sata_data);
34void orion5x_spi_init(void); 36void orion5x_spi_init(void);
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
index 500cdadaf09c..15f53235ee30 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
@@ -16,6 +16,7 @@
16#include <linux/mtd/physmap.h> 16#include <linux/mtd/physmap.h>
17#include <linux/mv643xx_eth.h> 17#include <linux/mv643xx_eth.h>
18#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <net/dsa.h>
19#include <asm/mach-types.h> 20#include <asm/mach-types.h>
20#include <asm/gpio.h> 21#include <asm/gpio.h>
21#include <asm/leds.h> 22#include <asm/leds.h>
@@ -93,6 +94,15 @@ static struct mv643xx_eth_platform_data rd88f5181l_fxo_eth_data = {
93 .duplex = DUPLEX_FULL, 94 .duplex = DUPLEX_FULL,
94}; 95};
95 96
97static struct dsa_platform_data rd88f5181l_fxo_switch_data = {
98 .port_names[0] = "lan2",
99 .port_names[1] = "lan1",
100 .port_names[2] = "wan",
101 .port_names[3] = "cpu",
102 .port_names[5] = "lan4",
103 .port_names[7] = "lan3",
104};
105
96static void __init rd88f5181l_fxo_init(void) 106static void __init rd88f5181l_fxo_init(void)
97{ 107{
98 /* 108 /*
@@ -107,6 +117,7 @@ static void __init rd88f5181l_fxo_init(void)
107 */ 117 */
108 orion5x_ehci0_init(); 118 orion5x_ehci0_init();
109 orion5x_eth_init(&rd88f5181l_fxo_eth_data); 119 orion5x_eth_init(&rd88f5181l_fxo_eth_data);
120 orion5x_eth_switch_init(&rd88f5181l_fxo_switch_data, NO_IRQ);
110 orion5x_uart0_init(); 121 orion5x_uart0_init();
111 122
112 orion5x_setup_dev_boot_win(RD88F5181L_FXO_NOR_BOOT_BASE, 123 orion5x_setup_dev_boot_win(RD88F5181L_FXO_NOR_BOOT_BASE,
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
index ebde81416499..8ad3934399d4 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
@@ -17,6 +17,7 @@
17#include <linux/mv643xx_eth.h> 17#include <linux/mv643xx_eth.h>
18#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/i2c.h> 19#include <linux/i2c.h>
20#include <net/dsa.h>
20#include <asm/mach-types.h> 21#include <asm/mach-types.h>
21#include <asm/gpio.h> 22#include <asm/gpio.h>
22#include <asm/leds.h> 23#include <asm/leds.h>
@@ -94,6 +95,15 @@ static struct mv643xx_eth_platform_data rd88f5181l_ge_eth_data = {
94 .duplex = DUPLEX_FULL, 95 .duplex = DUPLEX_FULL,
95}; 96};
96 97
98static struct dsa_platform_data rd88f5181l_ge_switch_data = {
99 .port_names[0] = "lan2",
100 .port_names[1] = "lan1",
101 .port_names[2] = "wan",
102 .port_names[3] = "cpu",
103 .port_names[5] = "lan4",
104 .port_names[7] = "lan3",
105};
106
97static struct i2c_board_info __initdata rd88f5181l_ge_i2c_rtc = { 107static struct i2c_board_info __initdata rd88f5181l_ge_i2c_rtc = {
98 I2C_BOARD_INFO("ds1338", 0x68), 108 I2C_BOARD_INFO("ds1338", 0x68),
99}; 109};
@@ -112,6 +122,7 @@ static void __init rd88f5181l_ge_init(void)
112 */ 122 */
113 orion5x_ehci0_init(); 123 orion5x_ehci0_init();
114 orion5x_eth_init(&rd88f5181l_ge_eth_data); 124 orion5x_eth_init(&rd88f5181l_ge_eth_data);
125 orion5x_eth_switch_init(&rd88f5181l_ge_switch_data, gpio_to_irq(8));
115 orion5x_i2c_init(); 126 orion5x_i2c_init();
116 orion5x_uart0_init(); 127 orion5x_uart0_init();
117 128
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
index 40e049539091..262e25e4dace 100644
--- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
@@ -19,6 +19,7 @@
19#include <linux/spi/orion_spi.h> 19#include <linux/spi/orion_spi.h>
20#include <linux/spi/flash.h> 20#include <linux/spi/flash.h>
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <net/dsa.h>
22#include <asm/mach-types.h> 23#include <asm/mach-types.h>
23#include <asm/gpio.h> 24#include <asm/gpio.h>
24#include <asm/leds.h> 25#include <asm/leds.h>
@@ -34,6 +35,15 @@ static struct mv643xx_eth_platform_data rd88f6183ap_ge_eth_data = {
34 .duplex = DUPLEX_FULL, 35 .duplex = DUPLEX_FULL,
35}; 36};
36 37
38static struct dsa_platform_data rd88f6183ap_ge_switch_data = {
39 .port_names[0] = "lan1",
40 .port_names[1] = "lan2",
41 .port_names[2] = "lan3",
42 .port_names[3] = "lan4",
43 .port_names[4] = "wan",
44 .port_names[5] = "cpu",
45};
46
37static struct mtd_partition rd88f6183ap_ge_partitions[] = { 47static struct mtd_partition rd88f6183ap_ge_partitions[] = {
38 { 48 {
39 .name = "kernel", 49 .name = "kernel",
@@ -79,6 +89,7 @@ static void __init rd88f6183ap_ge_init(void)
79 */ 89 */
80 orion5x_ehci0_init(); 90 orion5x_ehci0_init();
81 orion5x_eth_init(&rd88f6183ap_ge_eth_data); 91 orion5x_eth_init(&rd88f6183ap_ge_eth_data);
92 orion5x_eth_switch_init(&rd88f6183ap_ge_switch_data, gpio_to_irq(3));
82 spi_register_board_info(rd88f6183ap_ge_spi_slave_info, 93 spi_register_board_info(rd88f6183ap_ge_spi_slave_info,
83 ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info)); 94 ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info));
84 orion5x_spi_init(); 95 orion5x_spi_init();
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index 9a4fd5256462..cc8f89200865 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -15,6 +15,7 @@
15#include <linux/mtd/physmap.h> 15#include <linux/mtd/physmap.h>
16#include <linux/mv643xx_eth.h> 16#include <linux/mv643xx_eth.h>
17#include <linux/ethtool.h> 17#include <linux/ethtool.h>
18#include <net/dsa.h>
18#include <asm/mach-types.h> 19#include <asm/mach-types.h>
19#include <asm/gpio.h> 20#include <asm/gpio.h>
20#include <asm/mach/arch.h> 21#include <asm/mach/arch.h>
@@ -105,6 +106,15 @@ static struct mv643xx_eth_platform_data wrt350n_v2_eth_data = {
105 .duplex = DUPLEX_FULL, 106 .duplex = DUPLEX_FULL,
106}; 107};
107 108
109static struct dsa_platform_data wrt350n_v2_switch_data = {
110 .port_names[0] = "lan2",
111 .port_names[1] = "lan1",
112 .port_names[2] = "wan",
113 .port_names[3] = "cpu",
114 .port_names[5] = "lan3",
115 .port_names[7] = "lan4",
116};
117
108static void __init wrt350n_v2_init(void) 118static void __init wrt350n_v2_init(void)
109{ 119{
110 /* 120 /*
@@ -119,6 +129,7 @@ static void __init wrt350n_v2_init(void)
119 */ 129 */
120 orion5x_ehci0_init(); 130 orion5x_ehci0_init();
121 orion5x_eth_init(&wrt350n_v2_eth_data); 131 orion5x_eth_init(&wrt350n_v2_eth_data);
132 orion5x_eth_switch_init(&wrt350n_v2_switch_data, NO_IRQ);
122 orion5x_uart0_init(); 133 orion5x_uart0_init();
123 134
124 orion5x_setup_dev_boot_win(WRT350N_V2_NOR_BOOT_BASE, 135 orion5x_setup_dev_boot_win(WRT350N_V2_NOR_BOOT_BASE,
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index f27f6b3d6e6f..f781873431f3 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -257,7 +257,6 @@ config MACH_ARMCORE
257 bool "CompuLab CM-X255/CM-X270 modules" 257 bool "CompuLab CM-X255/CM-X270 modules"
258 select PXA27x 258 select PXA27x
259 select IWMMXT 259 select IWMMXT
260 select ZONE_DMA if PCI
261 select PXA25x 260 select PXA25x
262 select PXA_SSP 261 select PXA_SSP
263 262
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h
index 9c163e19ada9..32bb4a2eb7f1 100644
--- a/arch/arm/mach-pxa/include/mach/irqs.h
+++ b/arch/arm/mach-pxa/include/mach/irqs.h
@@ -9,7 +9,8 @@
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12#ifndef __ASM_MACH_IRQS_H
13#define __ASM_MACH_IRQS_H
13 14
14#ifdef CONFIG_PXA_HAVE_ISA_IRQS 15#ifdef CONFIG_PXA_HAVE_ISA_IRQS
15#define PXA_ISA_IRQ(x) (x) 16#define PXA_ISA_IRQ(x) (x)
@@ -264,3 +265,5 @@
264#endif 265#endif
265 266
266#endif /* CONFIG_PCI_HOST_ITE8152 */ 267#endif /* CONFIG_PCI_HOST_ITE8152 */
268
269#endif /* __ASM_MACH_IRQS_H */
diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/include/mach/spitz.h
index 31ac26b55bc1..e8488dfb7e91 100644
--- a/arch/arm/mach-pxa/include/mach/spitz.h
+++ b/arch/arm/mach-pxa/include/mach/spitz.h
@@ -142,7 +142,7 @@
142 142
143#define SPITZ_SCP2_GPIO_BASE (NR_BUILTIN_GPIO + 12) 143#define SPITZ_SCP2_GPIO_BASE (NR_BUILTIN_GPIO + 12)
144#define SPITZ_GPIO_IR_ON (SPITZ_SCP2_GPIO_BASE + 0) 144#define SPITZ_GPIO_IR_ON (SPITZ_SCP2_GPIO_BASE + 0)
145#define SPITZ_GPIO_AKIN_PULLUP (SPITZ_SCP2_GPIO_BASE + 1 145#define SPITZ_GPIO_AKIN_PULLUP (SPITZ_SCP2_GPIO_BASE + 1)
146#define SPITZ_GPIO_RESERVED_1 (SPITZ_SCP2_GPIO_BASE + 2) 146#define SPITZ_GPIO_RESERVED_1 (SPITZ_SCP2_GPIO_BASE + 2)
147#define SPITZ_GPIO_RESERVED_2 (SPITZ_SCP2_GPIO_BASE + 3) 147#define SPITZ_GPIO_RESERVED_2 (SPITZ_SCP2_GPIO_BASE + 3)
148#define SPITZ_GPIO_RESERVED_3 (SPITZ_SCP2_GPIO_BASE + 4) 148#define SPITZ_GPIO_RESERVED_3 (SPITZ_SCP2_GPIO_BASE + 4)
diff --git a/arch/arm/mach-pxa/pwm.c b/arch/arm/mach-pxa/pwm.c
index 316cd986da5c..74e2ead8cee8 100644
--- a/arch/arm/mach-pxa/pwm.c
+++ b/arch/arm/mach-pxa/pwm.c
@@ -60,7 +60,7 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
60 do_div(c, 1000000000); 60 do_div(c, 1000000000);
61 period_cycles = c; 61 period_cycles = c;
62 62
63 if (period_cycles < 0) 63 if (period_cycles < 1)
64 period_cycles = 1; 64 period_cycles = 1;
65 prescale = (period_cycles - 1) / 1024; 65 prescale = (period_cycles - 1) / 1024;
66 pv = period_cycles / (prescale + 1) - 1; 66 pv = period_cycles / (prescale + 1) - 1;
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index a13dbf3c2c05..a72e3add743c 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -399,7 +399,7 @@ static void trizeps4_irda_transceiver_mode(struct device *dev, int mode)
399 /* Switch mode */ 399 /* Switch mode */
400 if (mode & IR_SIRMODE) 400 if (mode & IR_SIRMODE)
401 trizeps_conxs_ircr &= ~ConXS_IRCR_MODE; /* Slow mode */ 401 trizeps_conxs_ircr &= ~ConXS_IRCR_MODE; /* Slow mode */
402 else if (mode & IR_FIRMODE) { 402 else if (mode & IR_FIRMODE)
403 trizeps_conxs_ircr |= ConXS_IRCR_MODE; /* Fast mode */ 403 trizeps_conxs_ircr |= ConXS_IRCR_MODE; /* Fast mode */
404 404
405 /* Switch power */ 405 /* Switch power */
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index 2f60bf6b8d43..f854e7385e3c 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -1033,8 +1033,7 @@ void __init s3c2443_init_clocks(int xtal)
1033 1033
1034 fclk = pll / s3c2443_fclk_div(clkdiv0); 1034 fclk = pll / s3c2443_fclk_div(clkdiv0);
1035 hclk = s3c2443_prediv_getrate(&clk_prediv); 1035 hclk = s3c2443_prediv_getrate(&clk_prediv);
1036 hclk = hclk / s3c2443_get_hdiv(clkdiv0); 1036 hclk /= s3c2443_get_hdiv(clkdiv0);
1037 hclk = hclk / ((clkdiv0 & S3C2443_CLKDIV0_HALF_HCLK) ? 2 : 1);
1038 pclk = hclk / ((clkdiv0 & S3C2443_CLKDIV0_HALF_PCLK) ? 2 : 1); 1037 pclk = hclk / ((clkdiv0 & S3C2443_CLKDIV0_HALF_PCLK) ? 2 : 1);
1039 1038
1040 s3c24xx_setup_clocks(xtal, fclk, hclk, pclk); 1039 s3c24xx_setup_clocks(xtal, fclk, hclk, pclk);
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 33926c9fcda6..5786adf10040 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -29,7 +29,7 @@ ENTRY(v4_flush_user_cache_all)
29 * Clean and invalidate the entire cache. 29 * Clean and invalidate the entire cache.
30 */ 30 */
31ENTRY(v4_flush_kern_cache_all) 31ENTRY(v4_flush_kern_cache_all)
32#ifdef CPU_CP15 32#ifdef CONFIG_CPU_CP15
33 mov r0, #0 33 mov r0, #0
34 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 34 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
35 mov pc, lr 35 mov pc, lr
@@ -48,7 +48,7 @@ ENTRY(v4_flush_kern_cache_all)
48 * - flags - vma_area_struct flags describing address space 48 * - flags - vma_area_struct flags describing address space
49 */ 49 */
50ENTRY(v4_flush_user_cache_range) 50ENTRY(v4_flush_user_cache_range)
51#ifdef CPU_CP15 51#ifdef CONFIG_CPU_CP15
52 mov ip, #0 52 mov ip, #0
53 mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache 53 mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
54 mov pc, lr 54 mov pc, lr
@@ -116,7 +116,7 @@ ENTRY(v4_dma_inv_range)
116 * - end - virtual end address 116 * - end - virtual end address
117 */ 117 */
118ENTRY(v4_dma_flush_range) 118ENTRY(v4_dma_flush_range)
119#ifdef CPU_CP15 119#ifdef CONFIG_CPU_CP15
120 mov r0, #0 120 mov r0, #0
121 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 121 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
122#endif 122#endif
diff --git a/arch/arm/plat-s3c24xx/pwm-clock.c b/arch/arm/plat-s3c24xx/pwm-clock.c
index b8e854f1b1d5..3fad68a1e6bc 100644
--- a/arch/arm/plat-s3c24xx/pwm-clock.c
+++ b/arch/arm/plat-s3c24xx/pwm-clock.c
@@ -315,7 +315,7 @@ static int clk_pwm_tin_set_parent(struct clk *clk, struct clk *parent)
315 if (parent == s3c24xx_pwmclk_tclk(id)) 315 if (parent == s3c24xx_pwmclk_tclk(id))
316 bits = S3C2410_TCFG1_MUX_TCLK << shift; 316 bits = S3C2410_TCFG1_MUX_TCLK << shift;
317 else if (parent == s3c24xx_pwmclk_tdiv(id)) 317 else if (parent == s3c24xx_pwmclk_tdiv(id))
318 bits = clk_pwm_tdiv_bits(to_tdiv(clk)) << shift; 318 bits = clk_pwm_tdiv_bits(to_tdiv(parent)) << shift;
319 else 319 else
320 return -EINVAL; 320 return -EINVAL;
321 321
diff --git a/arch/arm/plat-s3c24xx/pwm.c b/arch/arm/plat-s3c24xx/pwm.c
index feb770f2e84e..ec56b88866c4 100644
--- a/arch/arm/plat-s3c24xx/pwm.c
+++ b/arch/arm/plat-s3c24xx/pwm.c
@@ -56,7 +56,7 @@ static struct clk *clk_scaler[2];
56 } \ 56 } \
57 } 57 }
58 58
59#define DEFINE_TIMER(_tmr_no, _irq) \ 59#define DEFINE_S3C_TIMER(_tmr_no, _irq) \
60 .name = "s3c24xx-pwm", \ 60 .name = "s3c24xx-pwm", \
61 .id = _tmr_no, \ 61 .id = _tmr_no, \
62 .num_resources = TIMER_RESOURCE_SIZE, \ 62 .num_resources = TIMER_RESOURCE_SIZE, \
@@ -67,11 +67,11 @@ static struct clk *clk_scaler[2];
67 */ 67 */
68 68
69struct platform_device s3c_device_timer[] = { 69struct platform_device s3c_device_timer[] = {
70 [0] = { DEFINE_TIMER(0, IRQ_TIMER0) }, 70 [0] = { DEFINE_S3C_TIMER(0, IRQ_TIMER0) },
71 [1] = { DEFINE_TIMER(1, IRQ_TIMER1) }, 71 [1] = { DEFINE_S3C_TIMER(1, IRQ_TIMER1) },
72 [2] = { DEFINE_TIMER(2, IRQ_TIMER2) }, 72 [2] = { DEFINE_S3C_TIMER(2, IRQ_TIMER2) },
73 [3] = { DEFINE_TIMER(3, IRQ_TIMER3) }, 73 [3] = { DEFINE_S3C_TIMER(3, IRQ_TIMER3) },
74 [4] = { DEFINE_TIMER(4, IRQ_TIMER4) }, 74 [4] = { DEFINE_S3C_TIMER(4, IRQ_TIMER4) },
75}; 75};
76 76
77static inline int pwm_is_tdiv(struct pwm_device *pwm) 77static inline int pwm_is_tdiv(struct pwm_device *pwm)
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 07335e719bf8..b17aeea8d620 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -679,6 +679,8 @@ source "fs/Kconfig"
679 679
680source "drivers/usb/Kconfig" 680source "drivers/usb/Kconfig"
681 681
682source "drivers/uwb/Kconfig"
683
682source "arch/cris/Kconfig.debug" 684source "arch/cris/Kconfig.debug"
683 685
684source "security/Kconfig" 686source "security/Kconfig"
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index bd1995403c67..28f06fd9b7b7 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -216,6 +216,8 @@ source "drivers/hwmon/Kconfig"
216 216
217source "drivers/usb/Kconfig" 217source "drivers/usb/Kconfig"
218 218
219source "drivers/uwb/Kconfig"
220
219endmenu 221endmenu
220 222
221source "fs/Kconfig" 223source "fs/Kconfig"
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 912c57db2d21..27eec71429b0 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -23,6 +23,7 @@ config IA64
23 select HAVE_KRETPROBES 23 select HAVE_KRETPROBES
24 select HAVE_DMA_ATTRS 24 select HAVE_DMA_ATTRS
25 select HAVE_KVM 25 select HAVE_KVM
26 select HAVE_ARCH_TRACEHOOK
26 default y 27 default y
27 help 28 help
28 The Itanium Processor Family is Intel's 64-bit successor to 29 The Itanium Processor Family is Intel's 64-bit successor to
@@ -110,6 +111,33 @@ config AUDIT_ARCH
110 bool 111 bool
111 default y 112 default y
112 113
114menuconfig PARAVIRT_GUEST
115 bool "Paravirtualized guest support"
116 help
117 Say Y here to get to see options related to running Linux under
118 various hypervisors. This option alone does not add any kernel code.
119
120 If you say N, all options in this submenu will be skipped and disabled.
121
122if PARAVIRT_GUEST
123
124config PARAVIRT
125 bool "Enable paravirtualization code"
126 depends on PARAVIRT_GUEST
127 default y
128 bool
129 default y
130 help
131 This changes the kernel so it can modify itself when it is run
132 under a hypervisor, potentially improving performance significantly
133 over full virtualization. However, when run without a hypervisor
134 the kernel is theoretically slower and slightly larger.
135
136
137source "arch/ia64/xen/Kconfig"
138
139endif
140
113choice 141choice
114 prompt "System type" 142 prompt "System type"
115 default IA64_GENERIC 143 default IA64_GENERIC
@@ -119,6 +147,7 @@ config IA64_GENERIC
119 select NUMA 147 select NUMA
120 select ACPI_NUMA 148 select ACPI_NUMA
121 select SWIOTLB 149 select SWIOTLB
150 select PCI_MSI
122 help 151 help
123 This selects the system type of your hardware. A "generic" kernel 152 This selects the system type of your hardware. A "generic" kernel
124 will run on any supported IA-64 system. However, if you configure 153 will run on any supported IA-64 system. However, if you configure
@@ -126,11 +155,13 @@ config IA64_GENERIC
126 155
127 generic For any supported IA-64 system 156 generic For any supported IA-64 system
128 DIG-compliant For DIG ("Developer's Interface Guide") compliant systems 157 DIG-compliant For DIG ("Developer's Interface Guide") compliant systems
158 DIG+Intel+IOMMU For DIG systems with Intel IOMMU
129 HP-zx1/sx1000 For HP systems 159 HP-zx1/sx1000 For HP systems
130 HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices. 160 HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices.
131 SGI-SN2 For SGI Altix systems 161 SGI-SN2 For SGI Altix systems
132 SGI-UV For SGI UV systems 162 SGI-UV For SGI UV systems
133 Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/> 163 Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
164 Xen-domU For xen domU system
134 165
135 If you don't know what to do, choose "generic". 166 If you don't know what to do, choose "generic".
136 167
@@ -138,6 +169,11 @@ config IA64_DIG
138 bool "DIG-compliant" 169 bool "DIG-compliant"
139 select SWIOTLB 170 select SWIOTLB
140 171
172config IA64_DIG_VTD
173 bool "DIG+Intel+IOMMU"
174 select DMAR
175 select PCI_MSI
176
141config IA64_HP_ZX1 177config IA64_HP_ZX1
142 bool "HP-zx1/sx1000" 178 bool "HP-zx1/sx1000"
143 help 179 help
@@ -181,6 +217,10 @@ config IA64_HP_SIM
181 bool "Ski-simulator" 217 bool "Ski-simulator"
182 select SWIOTLB 218 select SWIOTLB
183 219
220config IA64_XEN_GUEST
221 bool "Xen guest"
222 depends on XEN
223
184endchoice 224endchoice
185 225
186choice 226choice
@@ -583,6 +623,16 @@ source "drivers/pci/hotplug/Kconfig"
583 623
584source "drivers/pcmcia/Kconfig" 624source "drivers/pcmcia/Kconfig"
585 625
626config DMAR
627 bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
628 depends on IA64_GENERIC && ACPI && EXPERIMENTAL
629 help
630 DMA remapping (DMAR) devices support enables independent address
631 translations for Direct Memory Access (DMA) from devices.
632 These DMA remapping devices are reported via ACPI tables
633 and include PCI device scope covered by these DMA
634 remapping devices.
635
586endmenu 636endmenu
587 637
588endif 638endif
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 905d25b13d5a..58a7e46affda 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -53,12 +53,15 @@ libs-y += arch/ia64/lib/
53core-y += arch/ia64/kernel/ arch/ia64/mm/ 53core-y += arch/ia64/kernel/ arch/ia64/mm/
54core-$(CONFIG_IA32_SUPPORT) += arch/ia64/ia32/ 54core-$(CONFIG_IA32_SUPPORT) += arch/ia64/ia32/
55core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ 55core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
56core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/
56core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ 57core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
57core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/ 58core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
58core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/ 59core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
60core-$(CONFIG_IA64_XEN_GUEST) += arch/ia64/dig/
59core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/ 61core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
60core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/ 62core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
61core-$(CONFIG_KVM) += arch/ia64/kvm/ 63core-$(CONFIG_KVM) += arch/ia64/kvm/
64core-$(CONFIG_XEN) += arch/ia64/xen/
62 65
63drivers-$(CONFIG_PCI) += arch/ia64/pci/ 66drivers-$(CONFIG_PCI) += arch/ia64/pci/
64drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/ 67drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 9f483976228f..e05f9e1d3faa 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -233,6 +233,8 @@ CONFIG_DMIID=y
233CONFIG_BINFMT_ELF=y 233CONFIG_BINFMT_ELF=y
234CONFIG_BINFMT_MISC=m 234CONFIG_BINFMT_MISC=m
235 235
236# CONFIG_DMAR is not set
237
236# 238#
237# Power management and ACPI 239# Power management and ACPI
238# 240#
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index 797acf9066c1..c522edf23c62 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -172,6 +172,8 @@ CONFIG_DMIID=y
172CONFIG_BINFMT_ELF=y 172CONFIG_BINFMT_ELF=y
173CONFIG_BINFMT_MISC=m 173CONFIG_BINFMT_MISC=m
174 174
175# CONFIG_DMAR is not set
176
175# 177#
176# Power management and ACPI 178# Power management and ACPI
177# 179#
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile
index 971cd7870dd4..5c0283830bd6 100644
--- a/arch/ia64/dig/Makefile
+++ b/arch/ia64/dig/Makefile
@@ -6,4 +6,9 @@
6# 6#
7 7
8obj-y := setup.o 8obj-y := setup.o
9ifeq ($(CONFIG_DMAR), y)
10obj-$(CONFIG_IA64_GENERIC) += machvec.o machvec_vtd.o dig_vtd_iommu.o
11else
9obj-$(CONFIG_IA64_GENERIC) += machvec.o 12obj-$(CONFIG_IA64_GENERIC) += machvec.o
13endif
14obj-$(CONFIG_IA64_DIG_VTD) += dig_vtd_iommu.o
diff --git a/arch/ia64/dig/dig_vtd_iommu.c b/arch/ia64/dig/dig_vtd_iommu.c
new file mode 100644
index 000000000000..1c8a079017a3
--- /dev/null
+++ b/arch/ia64/dig/dig_vtd_iommu.c
@@ -0,0 +1,59 @@
1#include <linux/types.h>
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/intel-iommu.h>
5
6void *
7vtd_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8 gfp_t flags)
9{
10 return intel_alloc_coherent(dev, size, dma_handle, flags);
11}
12EXPORT_SYMBOL_GPL(vtd_alloc_coherent);
13
14void
15vtd_free_coherent(struct device *dev, size_t size, void *vaddr,
16 dma_addr_t dma_handle)
17{
18 intel_free_coherent(dev, size, vaddr, dma_handle);
19}
20EXPORT_SYMBOL_GPL(vtd_free_coherent);
21
22dma_addr_t
23vtd_map_single_attrs(struct device *dev, void *addr, size_t size,
24 int dir, struct dma_attrs *attrs)
25{
26 return intel_map_single(dev, (phys_addr_t)addr, size, dir);
27}
28EXPORT_SYMBOL_GPL(vtd_map_single_attrs);
29
30void
31vtd_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
32 int dir, struct dma_attrs *attrs)
33{
34 intel_unmap_single(dev, iova, size, dir);
35}
36EXPORT_SYMBOL_GPL(vtd_unmap_single_attrs);
37
38int
39vtd_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
40 int dir, struct dma_attrs *attrs)
41{
42 return intel_map_sg(dev, sglist, nents, dir);
43}
44EXPORT_SYMBOL_GPL(vtd_map_sg_attrs);
45
46void
47vtd_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
48 int nents, int dir, struct dma_attrs *attrs)
49{
50 intel_unmap_sg(dev, sglist, nents, dir);
51}
52EXPORT_SYMBOL_GPL(vtd_unmap_sg_attrs);
53
54int
55vtd_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
56{
57 return 0;
58}
59EXPORT_SYMBOL_GPL(vtd_dma_mapping_error);
diff --git a/arch/ia64/dig/machvec_vtd.c b/arch/ia64/dig/machvec_vtd.c
new file mode 100644
index 000000000000..7cd3eb471cad
--- /dev/null
+++ b/arch/ia64/dig/machvec_vtd.c
@@ -0,0 +1,3 @@
1#define MACHVEC_PLATFORM_NAME dig_vtd
2#define MACHVEC_PLATFORM_HEADER <asm/machvec_dig_vtd.h>
3#include <asm/machvec_init.h>
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index 53505bb04771..a8cf19958850 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -108,6 +108,11 @@ GLOBAL_ENTRY(ia32_trace_syscall)
108 ;; 108 ;;
109 st8 [r2]=r3 // initialize return code to -ENOSYS 109 st8 [r2]=r3 // initialize return code to -ENOSYS
110 br.call.sptk.few rp=syscall_trace_enter // give parent a chance to catch syscall args 110 br.call.sptk.few rp=syscall_trace_enter // give parent a chance to catch syscall args
111 cmp.lt p6,p0=r8,r0 // check tracehook
112 adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
113 ;;
114(p6) st8.spill [r2]=r8 // store return value in slot for r8
115(p6) br.spnt.few .ret4
111.ret2: // Need to reload arguments (they may be changed by the tracing process) 116.ret2: // Need to reload arguments (they may be changed by the tracing process)
112 adds r2=IA64_PT_REGS_R1_OFFSET+16,sp // r2 = &pt_regs.r1 117 adds r2=IA64_PT_REGS_R1_OFFSET+16,sp // r2 = &pt_regs.r1
113 adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13 118 adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13
@@ -199,10 +204,10 @@ ia32_syscall_table:
199 data8 sys_setuid /* 16-bit version */ 204 data8 sys_setuid /* 16-bit version */
200 data8 sys_getuid /* 16-bit version */ 205 data8 sys_getuid /* 16-bit version */
201 data8 compat_sys_stime /* 25 */ 206 data8 compat_sys_stime /* 25 */
202 data8 sys32_ptrace 207 data8 compat_sys_ptrace
203 data8 sys32_alarm 208 data8 sys32_alarm
204 data8 sys_ni_syscall 209 data8 sys_ni_syscall
205 data8 sys32_pause 210 data8 sys_pause
206 data8 compat_sys_utime /* 30 */ 211 data8 compat_sys_utime /* 30 */
207 data8 sys_ni_syscall /* old stty syscall holder */ 212 data8 sys_ni_syscall /* old stty syscall holder */
208 data8 sys_ni_syscall /* old gtty syscall holder */ 213 data8 sys_ni_syscall /* old gtty syscall holder */
@@ -215,7 +220,7 @@ ia32_syscall_table:
215 data8 sys_mkdir 220 data8 sys_mkdir
216 data8 sys_rmdir /* 40 */ 221 data8 sys_rmdir /* 40 */
217 data8 sys_dup 222 data8 sys_dup
218 data8 sys32_pipe 223 data8 sys_pipe
219 data8 compat_sys_times 224 data8 compat_sys_times
220 data8 sys_ni_syscall /* old prof syscall holder */ 225 data8 sys_ni_syscall /* old prof syscall holder */
221 data8 sys32_brk /* 45 */ 226 data8 sys32_brk /* 45 */
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index f4430bb4bbdc..5e92ae00bdbb 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -1098,21 +1098,6 @@ sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
1098 return ret; 1098 return ret;
1099} 1099}
1100 1100
1101asmlinkage long
1102sys32_pipe (int __user *fd)
1103{
1104 int retval;
1105 int fds[2];
1106
1107 retval = do_pipe_flags(fds, 0);
1108 if (retval)
1109 goto out;
1110 if (copy_to_user(fd, fds, sizeof(fds)))
1111 retval = -EFAULT;
1112 out:
1113 return retval;
1114}
1115
1116asmlinkage unsigned long 1101asmlinkage unsigned long
1117sys32_alarm (unsigned int seconds) 1102sys32_alarm (unsigned int seconds)
1118{ 1103{
@@ -1209,25 +1194,6 @@ sys32_waitpid (int pid, unsigned int *stat_addr, int options)
1209 return compat_sys_wait4(pid, stat_addr, options, NULL); 1194 return compat_sys_wait4(pid, stat_addr, options, NULL);
1210} 1195}
1211 1196
1212static unsigned int
1213ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val)
1214{
1215 size_t copied;
1216 unsigned int ret;
1217
1218 copied = access_process_vm(child, addr, val, sizeof(*val), 0);
1219 return (copied != sizeof(ret)) ? -EIO : 0;
1220}
1221
1222static unsigned int
1223ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val)
1224{
1225
1226 if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
1227 return -EIO;
1228 return 0;
1229}
1230
1231/* 1197/*
1232 * The order in which registers are stored in the ptrace regs structure 1198 * The order in which registers are stored in the ptrace regs structure
1233 */ 1199 */
@@ -1525,49 +1491,15 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __u
1525 return 0; 1491 return 0;
1526} 1492}
1527 1493
1528asmlinkage long 1494long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1529sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data) 1495 compat_ulong_t caddr, compat_ulong_t cdata)
1530{ 1496{
1531 struct task_struct *child; 1497 unsigned long addr = caddr;
1532 unsigned int value, tmp; 1498 unsigned long data = cdata;
1499 unsigned int tmp;
1533 long i, ret; 1500 long i, ret;
1534 1501
1535 lock_kernel();
1536 if (request == PTRACE_TRACEME) {
1537 ret = ptrace_traceme();
1538 goto out;
1539 }
1540
1541 child = ptrace_get_task_struct(pid);
1542 if (IS_ERR(child)) {
1543 ret = PTR_ERR(child);
1544 goto out;
1545 }
1546
1547 if (request == PTRACE_ATTACH) {
1548 ret = sys_ptrace(request, pid, addr, data);
1549 goto out_tsk;
1550 }
1551
1552 ret = ptrace_check_attach(child, request == PTRACE_KILL);
1553 if (ret < 0)
1554 goto out_tsk;
1555
1556 switch (request) { 1502 switch (request) {
1557 case PTRACE_PEEKTEXT:
1558 case PTRACE_PEEKDATA: /* read word at location addr */
1559 ret = ia32_peek(child, addr, &value);
1560 if (ret == 0)
1561 ret = put_user(value, (unsigned int __user *) compat_ptr(data));
1562 else
1563 ret = -EIO;
1564 goto out_tsk;
1565
1566 case PTRACE_POKETEXT:
1567 case PTRACE_POKEDATA: /* write the word at location addr */
1568 ret = ia32_poke(child, addr, data);
1569 goto out_tsk;
1570
1571 case PTRACE_PEEKUSR: /* read word at addr in USER area */ 1503 case PTRACE_PEEKUSR: /* read word at addr in USER area */
1572 ret = -EIO; 1504 ret = -EIO;
1573 if ((addr & 3) || addr > 17*sizeof(int)) 1505 if ((addr & 3) || addr > 17*sizeof(int))
@@ -1632,27 +1564,9 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data)
1632 compat_ptr(data)); 1564 compat_ptr(data));
1633 break; 1565 break;
1634 1566
1635 case PTRACE_GETEVENTMSG:
1636 ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data));
1637 break;
1638
1639 case PTRACE_SYSCALL: /* continue, stop after next syscall */
1640 case PTRACE_CONT: /* restart after signal. */
1641 case PTRACE_KILL:
1642 case PTRACE_SINGLESTEP: /* execute chile for one instruction */
1643 case PTRACE_DETACH: /* detach a process */
1644 ret = sys_ptrace(request, pid, addr, data);
1645 break;
1646
1647 default: 1567 default:
1648 ret = ptrace_request(child, request, addr, data); 1568 return compat_ptrace_request(child, request, caddr, cdata);
1649 break;
1650
1651 } 1569 }
1652 out_tsk:
1653 put_task_struct(child);
1654 out:
1655 unlock_kernel();
1656 return ret; 1570 return ret;
1657} 1571}
1658 1572
@@ -1704,14 +1618,6 @@ out:
1704} 1618}
1705 1619
1706asmlinkage int 1620asmlinkage int
1707sys32_pause (void)
1708{
1709 current->state = TASK_INTERRUPTIBLE;
1710 schedule();
1711 return -ERESTARTNOHAND;
1712}
1713
1714asmlinkage int
1715sys32_msync (unsigned int start, unsigned int len, int flags) 1621sys32_msync (unsigned int start, unsigned int len, int flags)
1716{ 1622{
1717 unsigned int addr; 1623 unsigned int addr;
diff --git a/arch/ia64/include/asm/break.h b/arch/ia64/include/asm/break.h
index f03402039896..e90c40ec9edf 100644
--- a/arch/ia64/include/asm/break.h
+++ b/arch/ia64/include/asm/break.h
@@ -20,4 +20,13 @@
20 */ 20 */
21#define __IA64_BREAK_SYSCALL 0x100000 21#define __IA64_BREAK_SYSCALL 0x100000
22 22
23/*
24 * Xen specific break numbers:
25 */
26#define __IA64_XEN_HYPERCALL 0x1000
27/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used
28 for xen hyperprivops */
29#define __IA64_XEN_HYPERPRIVOP_START 0x1
30#define __IA64_XEN_HYPERPRIVOP_MAX 0x1a
31
23#endif /* _ASM_IA64_BREAK_H */ 32#endif /* _ASM_IA64_BREAK_H */
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index afcfbda76e20..c8ce2719fee8 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -34,6 +34,8 @@ do { \
34#define flush_dcache_mmap_unlock(mapping) do { } while (0) 34#define flush_dcache_mmap_unlock(mapping) do { } while (0)
35 35
36extern void flush_icache_range (unsigned long start, unsigned long end); 36extern void flush_icache_range (unsigned long start, unsigned long end);
37extern void clflush_cache_range(void *addr, int size);
38
37 39
38#define flush_icache_user_range(vma, page, user_addr, len) \ 40#define flush_icache_user_range(vma, page, user_addr, len) \
39do { \ 41do { \
diff --git a/arch/ia64/include/asm/device.h b/arch/ia64/include/asm/device.h
index 3db6daf7f251..41ab85d66f33 100644
--- a/arch/ia64/include/asm/device.h
+++ b/arch/ia64/include/asm/device.h
@@ -10,6 +10,9 @@ struct dev_archdata {
10#ifdef CONFIG_ACPI 10#ifdef CONFIG_ACPI
11 void *acpi_handle; 11 void *acpi_handle;
12#endif 12#endif
13#ifdef CONFIG_DMAR
14 void *iommu; /* hook for IOMMU specific extension */
15#endif
13}; 16};
14 17
15#endif /* _ASM_IA64_DEVICE_H */ 18#endif /* _ASM_IA64_DEVICE_H */
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 06ff1ba21465..bbab7e2b0fc9 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -7,6 +7,49 @@
7 */ 7 */
8#include <asm/machvec.h> 8#include <asm/machvec.h>
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <asm/swiotlb.h>
11
12struct dma_mapping_ops {
13 int (*mapping_error)(struct device *dev,
14 dma_addr_t dma_addr);
15 void* (*alloc_coherent)(struct device *dev, size_t size,
16 dma_addr_t *dma_handle, gfp_t gfp);
17 void (*free_coherent)(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19 dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
20 size_t size, int direction);
21 void (*unmap_single)(struct device *dev, dma_addr_t addr,
22 size_t size, int direction);
23 void (*sync_single_for_cpu)(struct device *hwdev,
24 dma_addr_t dma_handle, size_t size,
25 int direction);
26 void (*sync_single_for_device)(struct device *hwdev,
27 dma_addr_t dma_handle, size_t size,
28 int direction);
29 void (*sync_single_range_for_cpu)(struct device *hwdev,
30 dma_addr_t dma_handle, unsigned long offset,
31 size_t size, int direction);
32 void (*sync_single_range_for_device)(struct device *hwdev,
33 dma_addr_t dma_handle, unsigned long offset,
34 size_t size, int direction);
35 void (*sync_sg_for_cpu)(struct device *hwdev,
36 struct scatterlist *sg, int nelems,
37 int direction);
38 void (*sync_sg_for_device)(struct device *hwdev,
39 struct scatterlist *sg, int nelems,
40 int direction);
41 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
42 int nents, int direction);
43 void (*unmap_sg)(struct device *hwdev,
44 struct scatterlist *sg, int nents,
45 int direction);
46 int (*dma_supported_op)(struct device *hwdev, u64 mask);
47 int is_phys;
48};
49
50extern struct dma_mapping_ops *dma_ops;
51extern struct ia64_machine_vector ia64_mv;
52extern void set_iommu_machvec(void);
10 53
11#define dma_alloc_coherent(dev, size, handle, gfp) \ 54#define dma_alloc_coherent(dev, size, handle, gfp) \
12 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) 55 platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
@@ -96,4 +139,11 @@ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
96 139
97#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ 140#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
98 141
142static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
143{
144 return dma_ops;
145}
146
147
148
99#endif /* _ASM_IA64_DMA_MAPPING_H */ 149#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
new file mode 100644
index 000000000000..5fb2bb93de3b
--- /dev/null
+++ b/arch/ia64/include/asm/iommu.h
@@ -0,0 +1,16 @@
1#ifndef _ASM_IA64_IOMMU_H
2#define _ASM_IA64_IOMMU_H 1
3
4#define cpu_has_x2apic 0
5/* 10 seconds */
6#define DMAR_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
7
8extern void pci_iommu_shutdown(void);
9extern void no_iommu_init(void);
10extern int force_iommu, no_iommu;
11extern int iommu_detected;
12extern void iommu_dma_init(void);
13extern void machvec_init(const char *name);
14extern int forbid_dac;
15
16#endif
diff --git a/arch/ia64/include/asm/kregs.h b/arch/ia64/include/asm/kregs.h
index aefcdfee7f23..39e65f6639f5 100644
--- a/arch/ia64/include/asm/kregs.h
+++ b/arch/ia64/include/asm/kregs.h
@@ -32,7 +32,7 @@
32#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ 32#define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */
33 33
34#define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/ 34#define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/
35#define IA64_TR_ALLOC_MAX 32 /* Max number for dynamic use*/ 35#define IA64_TR_ALLOC_MAX 64 /* Max number for dynamic use*/
36 36
37/* Processor status register bits: */ 37/* Processor status register bits: */
38#define IA64_PSR_BE_BIT 1 38#define IA64_PSR_BE_BIT 1
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 2b850ccafef5..1ea28bcee33b 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -120,6 +120,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
120# include <asm/machvec_hpsim.h> 120# include <asm/machvec_hpsim.h>
121# elif defined (CONFIG_IA64_DIG) 121# elif defined (CONFIG_IA64_DIG)
122# include <asm/machvec_dig.h> 122# include <asm/machvec_dig.h>
123# elif defined(CONFIG_IA64_DIG_VTD)
124# include <asm/machvec_dig_vtd.h>
123# elif defined (CONFIG_IA64_HP_ZX1) 125# elif defined (CONFIG_IA64_HP_ZX1)
124# include <asm/machvec_hpzx1.h> 126# include <asm/machvec_hpzx1.h>
125# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) 127# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
@@ -128,6 +130,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
128# include <asm/machvec_sn2.h> 130# include <asm/machvec_sn2.h>
129# elif defined (CONFIG_IA64_SGI_UV) 131# elif defined (CONFIG_IA64_SGI_UV)
130# include <asm/machvec_uv.h> 132# include <asm/machvec_uv.h>
133# elif defined (CONFIG_IA64_XEN_GUEST)
134# include <asm/machvec_xen.h>
131# elif defined (CONFIG_IA64_GENERIC) 135# elif defined (CONFIG_IA64_GENERIC)
132 136
133# ifdef MACHVEC_PLATFORM_HEADER 137# ifdef MACHVEC_PLATFORM_HEADER
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h
new file mode 100644
index 000000000000..3400b561e711
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_dig_vtd.h
@@ -0,0 +1,38 @@
1#ifndef _ASM_IA64_MACHVEC_DIG_VTD_h
2#define _ASM_IA64_MACHVEC_DIG_VTD_h
3
4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_dma_alloc_coherent vtd_alloc_coherent;
6extern ia64_mv_dma_free_coherent vtd_free_coherent;
7extern ia64_mv_dma_map_single_attrs vtd_map_single_attrs;
8extern ia64_mv_dma_unmap_single_attrs vtd_unmap_single_attrs;
9extern ia64_mv_dma_map_sg_attrs vtd_map_sg_attrs;
10extern ia64_mv_dma_unmap_sg_attrs vtd_unmap_sg_attrs;
11extern ia64_mv_dma_supported iommu_dma_supported;
12extern ia64_mv_dma_mapping_error vtd_dma_mapping_error;
13extern ia64_mv_dma_init pci_iommu_alloc;
14
15/*
16 * This stuff has dual use!
17 *
18 * For a generic kernel, the macros are used to initialize the
19 * platform's machvec structure. When compiling a non-generic kernel,
20 * the macros are used directly.
21 */
22#define platform_name "dig_vtd"
23#define platform_setup dig_setup
24#define platform_dma_init pci_iommu_alloc
25#define platform_dma_alloc_coherent vtd_alloc_coherent
26#define platform_dma_free_coherent vtd_free_coherent
27#define platform_dma_map_single_attrs vtd_map_single_attrs
28#define platform_dma_unmap_single_attrs vtd_unmap_single_attrs
29#define platform_dma_map_sg_attrs vtd_map_sg_attrs
30#define platform_dma_unmap_sg_attrs vtd_unmap_sg_attrs
31#define platform_dma_sync_single_for_cpu machvec_dma_sync_single
32#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg
33#define platform_dma_sync_single_for_device machvec_dma_sync_single
34#define platform_dma_sync_sg_for_device machvec_dma_sync_sg
35#define platform_dma_supported iommu_dma_supported
36#define platform_dma_mapping_error vtd_dma_mapping_error
37
38#endif /* _ASM_IA64_MACHVEC_DIG_VTD_h */
diff --git a/arch/ia64/include/asm/machvec_init.h b/arch/ia64/include/asm/machvec_init.h
index 7f21249fba3f..ef964b286842 100644
--- a/arch/ia64/include/asm/machvec_init.h
+++ b/arch/ia64/include/asm/machvec_init.h
@@ -1,3 +1,4 @@
1#include <asm/iommu.h>
1#include <asm/machvec.h> 2#include <asm/machvec.h>
2 3
3extern ia64_mv_send_ipi_t ia64_send_ipi; 4extern ia64_mv_send_ipi_t ia64_send_ipi;
diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h
new file mode 100644
index 000000000000..55f9228056cd
--- /dev/null
+++ b/arch/ia64/include/asm/machvec_xen.h
@@ -0,0 +1,22 @@
1#ifndef _ASM_IA64_MACHVEC_XEN_h
2#define _ASM_IA64_MACHVEC_XEN_h
3
4extern ia64_mv_setup_t dig_setup;
5extern ia64_mv_cpu_init_t xen_cpu_init;
6extern ia64_mv_irq_init_t xen_irq_init;
7extern ia64_mv_send_ipi_t xen_platform_send_ipi;
8
9/*
10 * This stuff has dual use!
11 *
12 * For a generic kernel, the macros are used to initialize the
13 * platform's machvec structure. When compiling a non-generic kernel,
14 * the macros are used directly.
15 */
16#define platform_name "xen"
17#define platform_setup dig_setup
18#define platform_cpu_init xen_cpu_init
19#define platform_irq_init xen_irq_init
20#define platform_send_ipi xen_platform_send_ipi
21
22#endif /* _ASM_IA64_MACHVEC_XEN_h */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 7245a5781594..6bc96ee54327 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -18,10 +18,11 @@
18 * - crash dumping code reserved region 18 * - crash dumping code reserved region
19 * - Kernel memory map built from EFI memory map 19 * - Kernel memory map built from EFI memory map
20 * - ELF core header 20 * - ELF core header
21 * - xen start info if CONFIG_XEN
21 * 22 *
22 * More could be added if necessary 23 * More could be added if necessary
23 */ 24 */
24#define IA64_MAX_RSVD_REGIONS 8 25#define IA64_MAX_RSVD_REGIONS 9
25 26
26struct rsvd_region { 27struct rsvd_region {
27 unsigned long start; /* virtual address of beginning of element */ 28 unsigned long start; /* virtual address of beginning of element */
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h
index c8efbf7b849e..0a1026cca4fa 100644
--- a/arch/ia64/include/asm/native/inst.h
+++ b/arch/ia64/include/asm/native/inst.h
@@ -36,8 +36,13 @@
36 ;; \ 36 ;; \
37 movl clob = PARAVIRT_POISON; \ 37 movl clob = PARAVIRT_POISON; \
38 ;; 38 ;;
39# define CLOBBER_PRED(pred_clob) \
40 ;; \
41 cmp.eq pred_clob, p0 = r0, r0 \
42 ;;
39#else 43#else
40# define CLOBBER(clob) /* nothing */ 44# define CLOBBER(clob) /* nothing */
45# define CLOBBER_PRED(pred_clob) /* nothing */
41#endif 46#endif
42 47
43#define MOV_FROM_IFA(reg) \ 48#define MOV_FROM_IFA(reg) \
@@ -136,7 +141,8 @@
136 141
137#define SSM_PSR_I(pred, pred_clob, clob) \ 142#define SSM_PSR_I(pred, pred_clob, clob) \
138(pred) ssm psr.i \ 143(pred) ssm psr.i \
139 CLOBBER(clob) 144 CLOBBER(clob) \
145 CLOBBER_PRED(pred_clob)
140 146
141#define RSM_PSR_I(pred, clob0, clob1) \ 147#define RSM_PSR_I(pred, clob0, clob1) \
142(pred) rsm psr.i \ 148(pred) rsm psr.i \
diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h
new file mode 100644
index 000000000000..b8e6eb1090d7
--- /dev/null
+++ b/arch/ia64/include/asm/native/pvchk_inst.h
@@ -0,0 +1,263 @@
1#ifndef _ASM_NATIVE_PVCHK_INST_H
2#define _ASM_NATIVE_PVCHK_INST_H
3
4/******************************************************************************
5 * arch/ia64/include/asm/native/pvchk_inst.h
6 * Checker for paravirtualizations of privileged operations.
7 *
8 * Copyright (C) 2005 Hewlett-Packard Co
9 * Dan Magenheimer <dan.magenheimer@hp.com>
10 *
11 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
12 * VA Linux Systems Japan K.K.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 *
28 */
29
30/**********************************************
31 * Instructions paravirtualized for correctness
32 **********************************************/
33
34/* "fc" and "thash" are privilege-sensitive instructions, meaning they
35 * may have different semantics depending on whether they are executed
36 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
37 * be allowed to execute directly, lest incorrect semantics result.
38 */
39
40#define fc .error "fc should not be used directly."
41#define thash .error "thash should not be used directly."
42
43/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
44 * is not currently used (though it may be in a long-format VHPT system!)
45 * and the semantics of cover only change if psr.ic is off which is very
46 * rare (and currently non-existent outside of assembly code
47 */
48#define ttag .error "ttag should not be used directly."
49#define cover .error "cover should not be used directly."
50
51/* There are also privilege-sensitive registers. These registers are
52 * readable at any privilege level but only writable at PL0.
53 */
54#define cpuid .error "cpuid should not be used directly."
55#define pmd .error "pmd should not be used directly."
56
57/*
58 * mov ar.eflag =
59 * mov = ar.eflag
60 */
61
62/**********************************************
63 * Instructions paravirtualized for performance
64 **********************************************/
65/*
66 * Those instructions include '.' which can't be handled by cpp.
67 * or can't be handled by cpp easily.
68 * They are handled by sed instead of cpp.
69 */
70
71/* for .S
72 * itc.i
73 * itc.d
74 *
75 * bsw.0
76 * bsw.1
77 *
78 * ssm psr.ic | PSR_DEFAULT_BITS
79 * ssm psr.ic
80 * rsm psr.ic
81 * ssm psr.i
82 * rsm psr.i
83 * rsm psr.i | psr.ic
84 * rsm psr.dt
85 * ssm psr.dt
86 *
87 * mov = cr.ifa
88 * mov = cr.itir
89 * mov = cr.isr
90 * mov = cr.iha
91 * mov = cr.ipsr
92 * mov = cr.iim
93 * mov = cr.iip
94 * mov = cr.ivr
95 * mov = psr
96 *
97 * mov cr.ifa =
98 * mov cr.itir =
99 * mov cr.iha =
100 * mov cr.ipsr =
101 * mov cr.ifs =
102 * mov cr.iip =
103 * mov cr.kr =
104 */
105
106/* for intrinsics
107 * ssm psr.i
108 * rsm psr.i
109 * mov = psr
110 * mov = ivr
111 * mov = tpr
112 * mov cr.itm =
113 * mov eoi =
114 * mov rr[] =
115 * mov = rr[]
116 * mov = kr
117 * mov kr =
118 * ptc.ga
119 */
120
121/*************************************************************
122 * define paravirtualized instrcution macros as nop to ingore.
123 * and check whether arguments are appropriate.
124 *************************************************************/
125
126/* check whether reg is a regular register */
127.macro is_rreg_in reg
128 .ifc "\reg", "r0"
129 nop 0
130 .exitm
131 .endif
132 ;;
133 mov \reg = r0
134 ;;
135.endm
136#define IS_RREG_IN(reg) is_rreg_in reg ;
137
138#define IS_RREG_OUT(reg) \
139 ;; \
140 mov reg = r0 \
141 ;;
142
143#define IS_RREG_CLOB(reg) IS_RREG_OUT(reg)
144
145/* check whether pred is a predicate register */
146#define IS_PRED_IN(pred) \
147 ;; \
148 (pred) nop 0 \
149 ;;
150
151#define IS_PRED_OUT(pred) \
152 ;; \
153 cmp.eq pred, p0 = r0, r0 \
154 ;;
155
156#define IS_PRED_CLOB(pred) IS_PRED_OUT(pred)
157
158
159#define DO_SAVE_MIN(__COVER, SAVE_IFS, EXTRA, WORKAROUND) \
160 nop 0
161#define MOV_FROM_IFA(reg) \
162 IS_RREG_OUT(reg)
163#define MOV_FROM_ITIR(reg) \
164 IS_RREG_OUT(reg)
165#define MOV_FROM_ISR(reg) \
166 IS_RREG_OUT(reg)
167#define MOV_FROM_IHA(reg) \
168 IS_RREG_OUT(reg)
169#define MOV_FROM_IPSR(pred, reg) \
170 IS_PRED_IN(pred) \
171 IS_RREG_OUT(reg)
172#define MOV_FROM_IIM(reg) \
173 IS_RREG_OUT(reg)
174#define MOV_FROM_IIP(reg) \
175 IS_RREG_OUT(reg)
176#define MOV_FROM_IVR(reg, clob) \
177 IS_RREG_OUT(reg) \
178 IS_RREG_CLOB(clob)
179#define MOV_FROM_PSR(pred, reg, clob) \
180 IS_PRED_IN(pred) \
181 IS_RREG_OUT(reg) \
182 IS_RREG_CLOB(clob)
183#define MOV_TO_IFA(reg, clob) \
184 IS_RREG_IN(reg) \
185 IS_RREG_CLOB(clob)
186#define MOV_TO_ITIR(pred, reg, clob) \
187 IS_PRED_IN(pred) \
188 IS_RREG_IN(reg) \
189 IS_RREG_CLOB(clob)
190#define MOV_TO_IHA(pred, reg, clob) \
191 IS_PRED_IN(pred) \
192 IS_RREG_IN(reg) \
193 IS_RREG_CLOB(clob)
194#define MOV_TO_IPSR(pred, reg, clob) \
195 IS_PRED_IN(pred) \
196 IS_RREG_IN(reg) \
197 IS_RREG_CLOB(clob)
198#define MOV_TO_IFS(pred, reg, clob) \
199 IS_PRED_IN(pred) \
200 IS_RREG_IN(reg) \
201 IS_RREG_CLOB(clob)
202#define MOV_TO_IIP(reg, clob) \
203 IS_RREG_IN(reg) \
204 IS_RREG_CLOB(clob)
205#define MOV_TO_KR(kr, reg, clob0, clob1) \
206 IS_RREG_IN(reg) \
207 IS_RREG_CLOB(clob0) \
208 IS_RREG_CLOB(clob1)
209#define ITC_I(pred, reg, clob) \
210 IS_PRED_IN(pred) \
211 IS_RREG_IN(reg) \
212 IS_RREG_CLOB(clob)
213#define ITC_D(pred, reg, clob) \
214 IS_PRED_IN(pred) \
215 IS_RREG_IN(reg) \
216 IS_RREG_CLOB(clob)
217#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
218 IS_PRED_IN(pred_i) \
219 IS_PRED_IN(pred_d) \
220 IS_RREG_IN(reg) \
221 IS_RREG_CLOB(clob)
222#define THASH(pred, reg0, reg1, clob) \
223 IS_PRED_IN(pred) \
224 IS_RREG_OUT(reg0) \
225 IS_RREG_IN(reg1) \
226 IS_RREG_CLOB(clob)
227#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
228 IS_RREG_CLOB(clob0) \
229 IS_RREG_CLOB(clob1)
230#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
231 IS_RREG_CLOB(clob0) \
232 IS_RREG_CLOB(clob1)
233#define RSM_PSR_IC(clob) \
234 IS_RREG_CLOB(clob)
235#define SSM_PSR_I(pred, pred_clob, clob) \
236 IS_PRED_IN(pred) \
237 IS_PRED_CLOB(pred_clob) \
238 IS_RREG_CLOB(clob)
239#define RSM_PSR_I(pred, clob0, clob1) \
240 IS_PRED_IN(pred) \
241 IS_RREG_CLOB(clob0) \
242 IS_RREG_CLOB(clob1)
243#define RSM_PSR_I_IC(clob0, clob1, clob2) \
244 IS_RREG_CLOB(clob0) \
245 IS_RREG_CLOB(clob1) \
246 IS_RREG_CLOB(clob2)
247#define RSM_PSR_DT \
248 nop 0
249#define SSM_PSR_DT_AND_SRLZ_I \
250 nop 0
251#define BSW_0(clob0, clob1, clob2) \
252 IS_RREG_CLOB(clob0) \
253 IS_RREG_CLOB(clob1) \
254 IS_RREG_CLOB(clob2)
255#define BSW_1(clob0, clob1) \
256 IS_RREG_CLOB(clob0) \
257 IS_RREG_CLOB(clob1)
258#define COVER \
259 nop 0
260#define RFI \
261 br.ret.sptk.many rp /* defining nop causes dependency error */
262
263#endif /* _ASM_NATIVE_PVCHK_INST_H */
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index 660cab044834..2bf3636473fe 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -117,7 +117,7 @@ static inline void paravirt_post_smp_prepare_boot_cpu(void)
117struct pv_iosapic_ops { 117struct pv_iosapic_ops {
118 void (*pcat_compat_init)(void); 118 void (*pcat_compat_init)(void);
119 119
120 struct irq_chip *(*get_irq_chip)(unsigned long trigger); 120 struct irq_chip *(*__get_irq_chip)(unsigned long trigger);
121 121
122 unsigned int (*__read)(char __iomem *iosapic, unsigned int reg); 122 unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
123 void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val); 123 void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
@@ -135,7 +135,7 @@ iosapic_pcat_compat_init(void)
135static inline struct irq_chip* 135static inline struct irq_chip*
136iosapic_get_irq_chip(unsigned long trigger) 136iosapic_get_irq_chip(unsigned long trigger)
137{ 137{
138 return pv_iosapic_ops.get_irq_chip(trigger); 138 return pv_iosapic_ops.__get_irq_chip(trigger);
139} 139}
140 140
141static inline unsigned int 141static inline unsigned int
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index ce342fb74246..1d660d89db0d 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -156,4 +156,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
156 return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14); 156 return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
157} 157}
158 158
159#ifdef CONFIG_DMAR
160extern void pci_iommu_alloc(void);
161#endif
159#endif /* _ASM_IA64_PCI_H */ 162#endif /* _ASM_IA64_PCI_H */
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
index 15f8dcfe6eee..6417c1ecb44e 100644
--- a/arch/ia64/include/asm/ptrace.h
+++ b/arch/ia64/include/asm/ptrace.h
@@ -240,6 +240,12 @@ struct switch_stack {
240 */ 240 */
241# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri) 241# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
242 242
243static inline unsigned long user_stack_pointer(struct pt_regs *regs)
244{
245 /* FIXME: should this be bspstore + nr_dirty regs? */
246 return regs->ar_bspstore;
247}
248
243#define regs_return_value(regs) ((regs)->r8) 249#define regs_return_value(regs) ((regs)->r8)
244 250
245/* Conserve space in histogram by encoding slot bits in address 251/* Conserve space in histogram by encoding slot bits in address
@@ -319,6 +325,8 @@ struct switch_stack {
319 #define arch_has_block_step() (1) 325 #define arch_has_block_step() (1)
320 extern void user_enable_block_step(struct task_struct *); 326 extern void user_enable_block_step(struct task_struct *);
321 327
328#define __ARCH_WANT_COMPAT_SYS_PTRACE
329
322#endif /* !__KERNEL__ */ 330#endif /* !__KERNEL__ */
323 331
324/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */ 332/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
diff --git a/arch/ia64/include/asm/pvclock-abi.h b/arch/ia64/include/asm/pvclock-abi.h
new file mode 100644
index 000000000000..44ef9ef8f5b3
--- /dev/null
+++ b/arch/ia64/include/asm/pvclock-abi.h
@@ -0,0 +1,48 @@
1/*
2 * same structure to x86's
3 * Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic.
4 * For now, define same duplicated definitions.
5 */
6
7#ifndef _ASM_IA64__PVCLOCK_ABI_H
8#define _ASM_IA64__PVCLOCK_ABI_H
9#ifndef __ASSEMBLY__
10
11/*
12 * These structs MUST NOT be changed.
13 * They are the ABI between hypervisor and guest OS.
14 * Both Xen and KVM are using this.
15 *
16 * pvclock_vcpu_time_info holds the system time and the tsc timestamp
17 * of the last update. So the guest can use the tsc delta to get a
18 * more precise system time. There is one per virtual cpu.
19 *
20 * pvclock_wall_clock references the point in time when the system
21 * time was zero (usually boot time), thus the guest calculates the
22 * current wall clock by adding the system time.
23 *
24 * Protocol for the "version" fields is: hypervisor raises it (making
25 * it uneven) before it starts updating the fields and raises it again
26 * (making it even) when it is done. Thus the guest can make sure the
27 * time values it got are consistent by checking the version before
28 * and after reading them.
29 */
30
31struct pvclock_vcpu_time_info {
32 u32 version;
33 u32 pad0;
34 u64 tsc_timestamp;
35 u64 system_time;
36 u32 tsc_to_system_mul;
37 s8 tsc_shift;
38 u8 pad[3];
39} __attribute__((__packed__)); /* 32 bytes */
40
41struct pvclock_wall_clock {
42 u32 version;
43 u32 sec;
44 u32 nsec;
45} __attribute__((__packed__));
46
47#endif /* __ASSEMBLY__ */
48#endif /* _ASM_IA64__PVCLOCK_ABI_H */
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
new file mode 100644
index 000000000000..fb79423834d0
--- /dev/null
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -0,0 +1,56 @@
1#ifndef ASM_IA64__SWIOTLB_H
2#define ASM_IA64__SWIOTLB_H
3
4#include <linux/dma-mapping.h>
5
6/* SWIOTLB interface */
7
8extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
9 size_t size, int dir);
10extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
11 dma_addr_t *dma_handle, gfp_t flags);
12extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
13 size_t size, int dir);
14extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
15 dma_addr_t dev_addr,
16 size_t size, int dir);
17extern void swiotlb_sync_single_for_device(struct device *hwdev,
18 dma_addr_t dev_addr,
19 size_t size, int dir);
20extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
21 dma_addr_t dev_addr,
22 unsigned long offset,
23 size_t size, int dir);
24extern void swiotlb_sync_single_range_for_device(struct device *hwdev,
25 dma_addr_t dev_addr,
26 unsigned long offset,
27 size_t size, int dir);
28extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
29 struct scatterlist *sg, int nelems,
30 int dir);
31extern void swiotlb_sync_sg_for_device(struct device *hwdev,
32 struct scatterlist *sg, int nelems,
33 int dir);
34extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
35 int nents, int direction);
36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
37 int nents, int direction);
38extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
39extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
40 void *vaddr, dma_addr_t dma_handle);
41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
42extern void swiotlb_init(void);
43
44extern int swiotlb_force;
45
46#ifdef CONFIG_SWIOTLB
47extern int swiotlb;
48extern void pci_swiotlb_init(void);
49#else
50#define swiotlb 0
51static inline void pci_swiotlb_init(void)
52{
53}
54#endif
55
56#endif /* ASM_IA64__SWIOTLB_H */
diff --git a/arch/ia64/include/asm/sync_bitops.h b/arch/ia64/include/asm/sync_bitops.h
new file mode 100644
index 000000000000..593c12eeb270
--- /dev/null
+++ b/arch/ia64/include/asm/sync_bitops.h
@@ -0,0 +1,51 @@
1#ifndef _ASM_IA64_SYNC_BITOPS_H
2#define _ASM_IA64_SYNC_BITOPS_H
3
4/*
5 * Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp>
6 *
7 * Based on synch_bitops.h which Dan Magenhaimer wrote.
8 *
9 * bit operations which provide guaranteed strong synchronisation
10 * when communicating with Xen or other guest OSes running on other CPUs.
11 */
12
13static inline void sync_set_bit(int nr, volatile void *addr)
14{
15 set_bit(nr, addr);
16}
17
18static inline void sync_clear_bit(int nr, volatile void *addr)
19{
20 clear_bit(nr, addr);
21}
22
23static inline void sync_change_bit(int nr, volatile void *addr)
24{
25 change_bit(nr, addr);
26}
27
28static inline int sync_test_and_set_bit(int nr, volatile void *addr)
29{
30 return test_and_set_bit(nr, addr);
31}
32
33static inline int sync_test_and_clear_bit(int nr, volatile void *addr)
34{
35 return test_and_clear_bit(nr, addr);
36}
37
38static inline int sync_test_and_change_bit(int nr, volatile void *addr)
39{
40 return test_and_change_bit(nr, addr);
41}
42
43static inline int sync_test_bit(int nr, const volatile void *addr)
44{
45 return test_bit(nr, addr);
46}
47
48#define sync_cmpxchg(ptr, old, new) \
49 ((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new)))
50
51#endif /* _ASM_IA64_SYNC_BITOPS_H */
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
new file mode 100644
index 000000000000..2f758a42f94b
--- /dev/null
+++ b/arch/ia64/include/asm/syscall.h
@@ -0,0 +1,163 @@
1/*
2 * Access to user system call parameters and results
3 *
4 * Copyright (C) 2008 Intel Corp. Shaohua Li <shaohua.li@intel.com>
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
9 *
10 * See asm-generic/syscall.h for descriptions of what we must do here.
11 */
12
13#ifndef _ASM_SYSCALL_H
14#define _ASM_SYSCALL_H 1
15
16#include <linux/sched.h>
17#include <linux/err.h>
18
19static inline long syscall_get_nr(struct task_struct *task,
20 struct pt_regs *regs)
21{
22 if ((long)regs->cr_ifs < 0) /* Not a syscall */
23 return -1;
24
25#ifdef CONFIG_IA32_SUPPORT
26 if (IS_IA32_PROCESS(regs))
27 return regs->r1;
28#endif
29
30 return regs->r15;
31}
32
33static inline void syscall_rollback(struct task_struct *task,
34 struct pt_regs *regs)
35{
36#ifdef CONFIG_IA32_SUPPORT
37 if (IS_IA32_PROCESS(regs))
38 regs->r8 = regs->r1;
39#endif
40
41 /* do nothing */
42}
43
44static inline long syscall_get_error(struct task_struct *task,
45 struct pt_regs *regs)
46{
47#ifdef CONFIG_IA32_SUPPORT
48 if (IS_IA32_PROCESS(regs))
49 return regs->r8;
50#endif
51
52 return regs->r10 == -1 ? regs->r8:0;
53}
54
55static inline long syscall_get_return_value(struct task_struct *task,
56 struct pt_regs *regs)
57{
58 return regs->r8;
59}
60
61static inline void syscall_set_return_value(struct task_struct *task,
62 struct pt_regs *regs,
63 int error, long val)
64{
65#ifdef CONFIG_IA32_SUPPORT
66 if (IS_IA32_PROCESS(regs)) {
67 regs->r8 = (long) error ? error : val;
68 return;
69 }
70#endif
71
72 if (error) {
73 /* error < 0, but ia64 uses > 0 return value */
74 regs->r8 = -error;
75 regs->r10 = -1;
76 } else {
77 regs->r8 = val;
78 regs->r10 = 0;
79 }
80}
81
82extern void ia64_syscall_get_set_arguments(struct task_struct *task,
83 struct pt_regs *regs, unsigned int i, unsigned int n,
84 unsigned long *args, int rw);
85static inline void syscall_get_arguments(struct task_struct *task,
86 struct pt_regs *regs,
87 unsigned int i, unsigned int n,
88 unsigned long *args)
89{
90 BUG_ON(i + n > 6);
91
92#ifdef CONFIG_IA32_SUPPORT
93 if (IS_IA32_PROCESS(regs)) {
94 switch (i + n) {
95 case 6:
96 if (!n--) break;
97 *args++ = regs->r13;
98 case 5:
99 if (!n--) break;
100 *args++ = regs->r15;
101 case 4:
102 if (!n--) break;
103 *args++ = regs->r14;
104 case 3:
105 if (!n--) break;
106 *args++ = regs->r10;
107 case 2:
108 if (!n--) break;
109 *args++ = regs->r9;
110 case 1:
111 if (!n--) break;
112 *args++ = regs->r11;
113 case 0:
114 if (!n--) break;
115 default:
116 BUG();
117 break;
118 }
119
120 return;
121 }
122#endif
123 ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
124}
125
126static inline void syscall_set_arguments(struct task_struct *task,
127 struct pt_regs *regs,
128 unsigned int i, unsigned int n,
129 unsigned long *args)
130{
131 BUG_ON(i + n > 6);
132
133#ifdef CONFIG_IA32_SUPPORT
134 if (IS_IA32_PROCESS(regs)) {
135 switch (i + n) {
136 case 6:
137 if (!n--) break;
138 regs->r13 = *args++;
139 case 5:
140 if (!n--) break;
141 regs->r15 = *args++;
142 case 4:
143 if (!n--) break;
144 regs->r14 = *args++;
145 case 3:
146 if (!n--) break;
147 regs->r10 = *args++;
148 case 2:
149 if (!n--) break;
150 regs->r9 = *args++;
151 case 1:
152 if (!n--) break;
153 regs->r11 = *args++;
154 case 0:
155 if (!n--) break;
156 }
157
158 return;
159 }
160#endif
161 ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
162}
163#endif /* _ASM_SYSCALL_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 7c60fcdd2efd..ae6922626bf4 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -87,9 +87,6 @@ struct thread_info {
87#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) 87#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))
88#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) 88#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
89 89
90#define tsk_set_notify_resume(tsk) \
91 set_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME)
92extern void tsk_clear_notify_resume(struct task_struct *tsk);
93#endif /* !__ASSEMBLY */ 90#endif /* !__ASSEMBLY */
94 91
95/* 92/*
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
index 05a6baf8a472..4e03cfe74a0c 100644
--- a/arch/ia64/include/asm/timex.h
+++ b/arch/ia64/include/asm/timex.h
@@ -39,4 +39,6 @@ get_cycles (void)
39 return ret; 39 return ret;
40} 40}
41 41
42extern void ia64_cpu_local_tick (void);
43
42#endif /* _ASM_IA64_TIMEX_H */ 44#endif /* _ASM_IA64_TIMEX_H */
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index d535833aab5e..f791576355ad 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -337,6 +337,7 @@
337# define __ARCH_WANT_SYS_NICE 337# define __ARCH_WANT_SYS_NICE
338# define __ARCH_WANT_SYS_OLD_GETRLIMIT 338# define __ARCH_WANT_SYS_OLD_GETRLIMIT
339# define __ARCH_WANT_SYS_OLDUMOUNT 339# define __ARCH_WANT_SYS_OLDUMOUNT
340# define __ARCH_WANT_SYS_PAUSE
340# define __ARCH_WANT_SYS_SIGPENDING 341# define __ARCH_WANT_SYS_SIGPENDING
341# define __ARCH_WANT_SYS_SIGPROCMASK 342# define __ARCH_WANT_SYS_SIGPROCMASK
342# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND 343# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
new file mode 100644
index 000000000000..73248781fba8
--- /dev/null
+++ b/arch/ia64/include/asm/xen/events.h
@@ -0,0 +1,50 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/events.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22#ifndef _ASM_IA64_XEN_EVENTS_H
23#define _ASM_IA64_XEN_EVENTS_H
24
25enum ipi_vector {
26 XEN_RESCHEDULE_VECTOR,
27 XEN_IPI_VECTOR,
28 XEN_CMCP_VECTOR,
29 XEN_CPEP_VECTOR,
30
31 XEN_NR_IPIS,
32};
33
34static inline int xen_irqs_disabled(struct pt_regs *regs)
35{
36 return !(ia64_psr(regs)->i);
37}
38
39static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
40{
41 struct pt_regs *old_regs;
42 old_regs = set_irq_regs(regs);
43 irq_enter();
44 __do_IRQ(irq);
45 irq_exit();
46 set_irq_regs(old_regs);
47}
48#define irq_ctx_init(cpu) do { } while (0)
49
50#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/include/asm/xen/grant_table.h b/arch/ia64/include/asm/xen/grant_table.h
new file mode 100644
index 000000000000..2b1fae0e2d11
--- /dev/null
+++ b/arch/ia64/include/asm/xen/grant_table.h
@@ -0,0 +1,29 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/grant_table.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_XEN_GRANT_TABLE_H
24#define _ASM_IA64_XEN_GRANT_TABLE_H
25
26struct vm_struct *xen_alloc_vm_area(unsigned long size);
27void xen_free_vm_area(struct vm_struct *area);
28
29#endif /* _ASM_IA64_XEN_GRANT_TABLE_H */
diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h
new file mode 100644
index 000000000000..96fc62366aa4
--- /dev/null
+++ b/arch/ia64/include/asm/xen/hypercall.h
@@ -0,0 +1,265 @@
1/******************************************************************************
2 * hypercall.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef _ASM_IA64_XEN_HYPERCALL_H
34#define _ASM_IA64_XEN_HYPERCALL_H
35
36#include <xen/interface/xen.h>
37#include <xen/interface/physdev.h>
38#include <xen/interface/sched.h>
39#include <asm/xen/xcom_hcall.h>
40struct xencomm_handle;
41extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
42 unsigned long a3, unsigned long a4,
43 unsigned long a5, unsigned long cmd);
44
45/*
46 * Assembler stubs for hyper-calls.
47 */
48
49#define _hypercall0(type, name) \
50({ \
51 long __res; \
52 __res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\
53 (type)__res; \
54})
55
56#define _hypercall1(type, name, a1) \
57({ \
58 long __res; \
59 __res = __hypercall((unsigned long)a1, \
60 0, 0, 0, 0, __HYPERVISOR_##name); \
61 (type)__res; \
62})
63
64#define _hypercall2(type, name, a1, a2) \
65({ \
66 long __res; \
67 __res = __hypercall((unsigned long)a1, \
68 (unsigned long)a2, \
69 0, 0, 0, __HYPERVISOR_##name); \
70 (type)__res; \
71})
72
73#define _hypercall3(type, name, a1, a2, a3) \
74({ \
75 long __res; \
76 __res = __hypercall((unsigned long)a1, \
77 (unsigned long)a2, \
78 (unsigned long)a3, \
79 0, 0, __HYPERVISOR_##name); \
80 (type)__res; \
81})
82
83#define _hypercall4(type, name, a1, a2, a3, a4) \
84({ \
85 long __res; \
86 __res = __hypercall((unsigned long)a1, \
87 (unsigned long)a2, \
88 (unsigned long)a3, \
89 (unsigned long)a4, \
90 0, __HYPERVISOR_##name); \
91 (type)__res; \
92})
93
94#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
95({ \
96 long __res; \
97 __res = __hypercall((unsigned long)a1, \
98 (unsigned long)a2, \
99 (unsigned long)a3, \
100 (unsigned long)a4, \
101 (unsigned long)a5, \
102 __HYPERVISOR_##name); \
103 (type)__res; \
104})
105
106
107static inline int
108xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
109{
110 return _hypercall2(int, sched_op_new, cmd, arg);
111}
112
113static inline long
114HYPERVISOR_set_timer_op(u64 timeout)
115{
116 unsigned long timeout_hi = (unsigned long)(timeout >> 32);
117 unsigned long timeout_lo = (unsigned long)timeout;
118 return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
119}
120
121static inline int
122xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
123 int nr_calls)
124{
125 return _hypercall2(int, multicall, call_list, nr_calls);
126}
127
128static inline int
129xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
130{
131 return _hypercall2(int, memory_op, cmd, arg);
132}
133
134static inline int
135xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
136{
137 return _hypercall2(int, event_channel_op, cmd, arg);
138}
139
140static inline int
141xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
142{
143 return _hypercall2(int, xen_version, cmd, arg);
144}
145
146static inline int
147xencomm_arch_hypercall_console_io(int cmd, int count,
148 struct xencomm_handle *str)
149{
150 return _hypercall3(int, console_io, cmd, count, str);
151}
152
153static inline int
154xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
155{
156 return _hypercall2(int, physdev_op, cmd, arg);
157}
158
159static inline int
160xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
161 struct xencomm_handle *uop,
162 unsigned int count)
163{
164 return _hypercall3(int, grant_table_op, cmd, uop, count);
165}
166
167int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
168
169extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
170
171static inline int
172xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
173{
174 return _hypercall2(int, callback_op, cmd, arg);
175}
176
177static inline long
178xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
179{
180 return _hypercall3(long, vcpu_op, cmd, cpu, arg);
181}
182
183static inline int
184HYPERVISOR_physdev_op(int cmd, void *arg)
185{
186 switch (cmd) {
187 case PHYSDEVOP_eoi:
188 return _hypercall1(int, ia64_fast_eoi,
189 ((struct physdev_eoi *)arg)->irq);
190 default:
191 return xencomm_hypercall_physdev_op(cmd, arg);
192 }
193}
194
195static inline long
196xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg)
197{
198 return _hypercall1(long, opt_feature, arg);
199}
200
201/* for balloon driver */
202#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
203
204/* Use xencomm to do hypercalls. */
205#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
206#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
207#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
208#define HYPERVISOR_multicall xencomm_hypercall_multicall
209#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
210#define HYPERVISOR_console_io xencomm_hypercall_console_io
211#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
212#define HYPERVISOR_suspend xencomm_hypercall_suspend
213#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
214#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature
215
216/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */
217#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; })
218
219static inline int
220HYPERVISOR_shutdown(
221 unsigned int reason)
222{
223 struct sched_shutdown sched_shutdown = {
224 .reason = reason
225 };
226
227 int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
228
229 return rc;
230}
231
232/* for netfront.c, netback.c */
233#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
234
235static inline void
236MULTI_update_va_mapping(
237 struct multicall_entry *mcl, unsigned long va,
238 pte_t new_val, unsigned long flags)
239{
240 mcl->op = __HYPERVISOR_update_va_mapping;
241 mcl->result = 0;
242}
243
244static inline void
245MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
246 void *uop, unsigned int count)
247{
248 mcl->op = __HYPERVISOR_grant_table_op;
249 mcl->args[0] = cmd;
250 mcl->args[1] = (unsigned long)uop;
251 mcl->args[2] = count;
252}
253
254static inline void
255MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
256 int count, int *success_count, domid_t domid)
257{
258 mcl->op = __HYPERVISOR_mmu_update;
259 mcl->args[0] = (unsigned long)req;
260 mcl->args[1] = count;
261 mcl->args[2] = (unsigned long)success_count;
262 mcl->args[3] = domid;
263}
264
265#endif /* _ASM_IA64_XEN_HYPERCALL_H */
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
new file mode 100644
index 000000000000..7a804e80fc67
--- /dev/null
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -0,0 +1,89 @@
1/******************************************************************************
2 * hypervisor.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef _ASM_IA64_XEN_HYPERVISOR_H
34#define _ASM_IA64_XEN_HYPERVISOR_H
35
36#ifdef CONFIG_XEN
37
38#include <linux/init.h>
39#include <xen/interface/xen.h>
40#include <xen/interface/version.h> /* to compile feature.c */
41#include <xen/features.h> /* to comiple xen-netfront.c */
42#include <asm/xen/hypercall.h>
43
44/* xen_domain_type is set before executing any C code by early_xen_setup */
45enum xen_domain_type {
46 XEN_NATIVE,
47 XEN_PV_DOMAIN,
48 XEN_HVM_DOMAIN,
49};
50
51extern enum xen_domain_type xen_domain_type;
52
53#define xen_domain() (xen_domain_type != XEN_NATIVE)
54#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
55#define xen_initial_domain() (xen_pv_domain() && \
56 (xen_start_info->flags & SIF_INITDOMAIN))
57#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
58
59/* deprecated. remove this */
60#define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN)
61
62extern struct shared_info *HYPERVISOR_shared_info;
63extern struct start_info *xen_start_info;
64
65void __init xen_setup_vcpu_info_placement(void);
66void force_evtchn_callback(void);
67
68/* for drivers/xen/balloon/balloon.c */
69#ifdef CONFIG_XEN_SCRUB_PAGES
70#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
71#else
72#define scrub_pages(_p, _n) ((void)0)
73#endif
74
75/* For setup_arch() in arch/ia64/kernel/setup.c */
76void xen_ia64_enable_opt_feature(void);
77
78#else /* CONFIG_XEN */
79
80#define xen_domain() (0)
81#define xen_pv_domain() (0)
82#define xen_initial_domain() (0)
83#define xen_hvm_domain() (0)
84#define is_running_on_xen() (0) /* deprecated. remove this */
85#endif
86
87#define is_initial_xendomain() (0) /* deprecated. remove this */
88
89#endif /* _ASM_IA64_XEN_HYPERVISOR_H */
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h
new file mode 100644
index 000000000000..19c2ae1d878a
--- /dev/null
+++ b/arch/ia64/include/asm/xen/inst.h
@@ -0,0 +1,458 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <asm/xen/privop.h>
24
25#define ia64_ivt xen_ivt
26#define DO_SAVE_MIN XEN_DO_SAVE_MIN
27
28#define __paravirt_switch_to xen_switch_to
29#define __paravirt_leave_syscall xen_leave_syscall
30#define __paravirt_work_processed_syscall xen_work_processed_syscall
31#define __paravirt_leave_kernel xen_leave_kernel
32#define __paravirt_pending_syscall_end xen_work_pending_syscall_end
33#define __paravirt_work_processed_syscall_target \
34 xen_work_processed_syscall
35
36#define MOV_FROM_IFA(reg) \
37 movl reg = XSI_IFA; \
38 ;; \
39 ld8 reg = [reg]
40
41#define MOV_FROM_ITIR(reg) \
42 movl reg = XSI_ITIR; \
43 ;; \
44 ld8 reg = [reg]
45
46#define MOV_FROM_ISR(reg) \
47 movl reg = XSI_ISR; \
48 ;; \
49 ld8 reg = [reg]
50
51#define MOV_FROM_IHA(reg) \
52 movl reg = XSI_IHA; \
53 ;; \
54 ld8 reg = [reg]
55
56#define MOV_FROM_IPSR(pred, reg) \
57(pred) movl reg = XSI_IPSR; \
58 ;; \
59(pred) ld8 reg = [reg]
60
61#define MOV_FROM_IIM(reg) \
62 movl reg = XSI_IIM; \
63 ;; \
64 ld8 reg = [reg]
65
66#define MOV_FROM_IIP(reg) \
67 movl reg = XSI_IIP; \
68 ;; \
69 ld8 reg = [reg]
70
71.macro __MOV_FROM_IVR reg, clob
72 .ifc "\reg", "r8"
73 XEN_HYPER_GET_IVR
74 .exitm
75 .endif
76 .ifc "\clob", "r8"
77 XEN_HYPER_GET_IVR
78 ;;
79 mov \reg = r8
80 .exitm
81 .endif
82
83 mov \clob = r8
84 ;;
85 XEN_HYPER_GET_IVR
86 ;;
87 mov \reg = r8
88 ;;
89 mov r8 = \clob
90.endm
91#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
92
93.macro __MOV_FROM_PSR pred, reg, clob
94 .ifc "\reg", "r8"
95 (\pred) XEN_HYPER_GET_PSR;
96 .exitm
97 .endif
98 .ifc "\clob", "r8"
99 (\pred) XEN_HYPER_GET_PSR
100 ;;
101 (\pred) mov \reg = r8
102 .exitm
103 .endif
104
105 (\pred) mov \clob = r8
106 (\pred) XEN_HYPER_GET_PSR
107 ;;
108 (\pred) mov \reg = r8
109 (\pred) mov r8 = \clob
110.endm
111#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
112
113
114#define MOV_TO_IFA(reg, clob) \
115 movl clob = XSI_IFA; \
116 ;; \
117 st8 [clob] = reg \
118
119#define MOV_TO_ITIR(pred, reg, clob) \
120(pred) movl clob = XSI_ITIR; \
121 ;; \
122(pred) st8 [clob] = reg
123
124#define MOV_TO_IHA(pred, reg, clob) \
125(pred) movl clob = XSI_IHA; \
126 ;; \
127(pred) st8 [clob] = reg
128
129#define MOV_TO_IPSR(pred, reg, clob) \
130(pred) movl clob = XSI_IPSR; \
131 ;; \
132(pred) st8 [clob] = reg; \
133 ;;
134
135#define MOV_TO_IFS(pred, reg, clob) \
136(pred) movl clob = XSI_IFS; \
137 ;; \
138(pred) st8 [clob] = reg; \
139 ;;
140
141#define MOV_TO_IIP(reg, clob) \
142 movl clob = XSI_IIP; \
143 ;; \
144 st8 [clob] = reg
145
146.macro ____MOV_TO_KR kr, reg, clob0, clob1
147 .ifc "\clob0", "r9"
148 .error "clob0 \clob0 must not be r9"
149 .endif
150 .ifc "\clob1", "r8"
151 .error "clob1 \clob1 must not be r8"
152 .endif
153
154 .ifnc "\reg", "r9"
155 .ifnc "\clob1", "r9"
156 mov \clob1 = r9
157 .endif
158 mov r9 = \reg
159 .endif
160 .ifnc "\clob0", "r8"
161 mov \clob0 = r8
162 .endif
163 mov r8 = \kr
164 ;;
165 XEN_HYPER_SET_KR
166
167 .ifnc "\reg", "r9"
168 .ifnc "\clob1", "r9"
169 mov r9 = \clob1
170 .endif
171 .endif
172 .ifnc "\clob0", "r8"
173 mov r8 = \clob0
174 .endif
175.endm
176
177.macro __MOV_TO_KR kr, reg, clob0, clob1
178 .ifc "\clob0", "r9"
179 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
180 .exitm
181 .endif
182 .ifc "\clob1", "r8"
183 ____MOV_TO_KR \kr, \reg, \clob1, \clob0
184 .exitm
185 .endif
186
187 ____MOV_TO_KR \kr, \reg, \clob0, \clob1
188.endm
189
190#define MOV_TO_KR(kr, reg, clob0, clob1) \
191 __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
192
193
194.macro __ITC_I pred, reg, clob
195 .ifc "\reg", "r8"
196 (\pred) XEN_HYPER_ITC_I
197 .exitm
198 .endif
199 .ifc "\clob", "r8"
200 (\pred) mov r8 = \reg
201 ;;
202 (\pred) XEN_HYPER_ITC_I
203 .exitm
204 .endif
205
206 (\pred) mov \clob = r8
207 (\pred) mov r8 = \reg
208 ;;
209 (\pred) XEN_HYPER_ITC_I
210 ;;
211 (\pred) mov r8 = \clob
212 ;;
213.endm
214#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
215
216.macro __ITC_D pred, reg, clob
217 .ifc "\reg", "r8"
218 (\pred) XEN_HYPER_ITC_D
219 ;;
220 .exitm
221 .endif
222 .ifc "\clob", "r8"
223 (\pred) mov r8 = \reg
224 ;;
225 (\pred) XEN_HYPER_ITC_D
226 ;;
227 .exitm
228 .endif
229
230 (\pred) mov \clob = r8
231 (\pred) mov r8 = \reg
232 ;;
233 (\pred) XEN_HYPER_ITC_D
234 ;;
235 (\pred) mov r8 = \clob
236 ;;
237.endm
238#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
239
240.macro __ITC_I_AND_D pred_i, pred_d, reg, clob
241 .ifc "\reg", "r8"
242 (\pred_i)XEN_HYPER_ITC_I
243 ;;
244 (\pred_d)XEN_HYPER_ITC_D
245 ;;
246 .exitm
247 .endif
248 .ifc "\clob", "r8"
249 mov r8 = \reg
250 ;;
251 (\pred_i)XEN_HYPER_ITC_I
252 ;;
253 (\pred_d)XEN_HYPER_ITC_D
254 ;;
255 .exitm
256 .endif
257
258 mov \clob = r8
259 mov r8 = \reg
260 ;;
261 (\pred_i)XEN_HYPER_ITC_I
262 ;;
263 (\pred_d)XEN_HYPER_ITC_D
264 ;;
265 mov r8 = \clob
266 ;;
267.endm
268#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
269 __ITC_I_AND_D pred_i, pred_d, reg, clob
270
271.macro __THASH pred, reg0, reg1, clob
272 .ifc "\reg0", "r8"
273 (\pred) mov r8 = \reg1
274 (\pred) XEN_HYPER_THASH
275 .exitm
276 .endc
277 .ifc "\reg1", "r8"
278 (\pred) XEN_HYPER_THASH
279 ;;
280 (\pred) mov \reg0 = r8
281 ;;
282 .exitm
283 .endif
284 .ifc "\clob", "r8"
285 (\pred) mov r8 = \reg1
286 (\pred) XEN_HYPER_THASH
287 ;;
288 (\pred) mov \reg0 = r8
289 ;;
290 .exitm
291 .endif
292
293 (\pred) mov \clob = r8
294 (\pred) mov r8 = \reg1
295 (\pred) XEN_HYPER_THASH
296 ;;
297 (\pred) mov \reg0 = r8
298 (\pred) mov r8 = \clob
299 ;;
300.endm
301#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
302
303#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
304 mov clob0 = 1; \
305 movl clob1 = XSI_PSR_IC; \
306 ;; \
307 st4 [clob1] = clob0 \
308 ;;
309
310#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
311 ;; \
312 srlz.d; \
313 mov clob1 = 1; \
314 movl clob0 = XSI_PSR_IC; \
315 ;; \
316 st4 [clob0] = clob1
317
318#define RSM_PSR_IC(clob) \
319 movl clob = XSI_PSR_IC; \
320 ;; \
321 st4 [clob] = r0; \
322 ;;
323
324/* pred will be clobbered */
325#define MASK_TO_PEND_OFS (-1)
326#define SSM_PSR_I(pred, pred_clob, clob) \
327(pred) movl clob = XSI_PSR_I_ADDR \
328 ;; \
329(pred) ld8 clob = [clob] \
330 ;; \
331 /* if (pred) vpsr.i = 1 */ \
332 /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
333(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
334 ;; \
335 /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
336(pred) ld1 clob = [clob] \
337 ;; \
338(pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
339 ;; \
340(pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
341
342#define RSM_PSR_I(pred, clob0, clob1) \
343 movl clob0 = XSI_PSR_I_ADDR; \
344 mov clob1 = 1; \
345 ;; \
346 ld8 clob0 = [clob0]; \
347 ;; \
348(pred) st1 [clob0] = clob1
349
350#define RSM_PSR_I_IC(clob0, clob1, clob2) \
351 movl clob0 = XSI_PSR_I_ADDR; \
352 movl clob1 = XSI_PSR_IC; \
353 ;; \
354 ld8 clob0 = [clob0]; \
355 mov clob2 = 1; \
356 ;; \
357 /* note: clears both vpsr.i and vpsr.ic! */ \
358 st1 [clob0] = clob2; \
359 st4 [clob1] = r0; \
360 ;;
361
362#define RSM_PSR_DT \
363 XEN_HYPER_RSM_PSR_DT
364
365#define SSM_PSR_DT_AND_SRLZ_I \
366 XEN_HYPER_SSM_PSR_DT
367
368#define BSW_0(clob0, clob1, clob2) \
369 ;; \
370 /* r16-r31 all now hold bank1 values */ \
371 mov clob2 = ar.unat; \
372 movl clob0 = XSI_BANK1_R16; \
373 movl clob1 = XSI_BANK1_R16 + 8; \
374 ;; \
375.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
376.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
377 ;; \
378.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
379.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
380 ;; \
381.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
382.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
383 ;; \
384.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
385.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
386 ;; \
387.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
388.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
389 ;; \
390.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
391.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
392 ;; \
393.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
394.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
395 ;; \
396.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
397.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
398 ;; \
399 mov clob1 = ar.unat; \
400 movl clob0 = XSI_B1NAT; \
401 ;; \
402 st8 [clob0] = clob1; \
403 mov ar.unat = clob2; \
404 movl clob0 = XSI_BANKNUM; \
405 ;; \
406 st4 [clob0] = r0
407
408
409 /* FIXME: THIS CODE IS NOT NaT SAFE! */
410#define XEN_BSW_1(clob) \
411 mov clob = ar.unat; \
412 movl r30 = XSI_B1NAT; \
413 ;; \
414 ld8 r30 = [r30]; \
415 mov r31 = 1; \
416 ;; \
417 mov ar.unat = r30; \
418 movl r30 = XSI_BANKNUM; \
419 ;; \
420 st4 [r30] = r31; \
421 movl r30 = XSI_BANK1_R16; \
422 movl r31 = XSI_BANK1_R16+8; \
423 ;; \
424 ld8.fill r16 = [r30], 16; \
425 ld8.fill r17 = [r31], 16; \
426 ;; \
427 ld8.fill r18 = [r30], 16; \
428 ld8.fill r19 = [r31], 16; \
429 ;; \
430 ld8.fill r20 = [r30], 16; \
431 ld8.fill r21 = [r31], 16; \
432 ;; \
433 ld8.fill r22 = [r30], 16; \
434 ld8.fill r23 = [r31], 16; \
435 ;; \
436 ld8.fill r24 = [r30], 16; \
437 ld8.fill r25 = [r31], 16; \
438 ;; \
439 ld8.fill r26 = [r30], 16; \
440 ld8.fill r27 = [r31], 16; \
441 ;; \
442 ld8.fill r28 = [r30], 16; \
443 ld8.fill r29 = [r31], 16; \
444 ;; \
445 ld8.fill r30 = [r30]; \
446 ld8.fill r31 = [r31]; \
447 ;; \
448 mov ar.unat = clob
449
450#define BSW_1(clob0, clob1) XEN_BSW_1(clob1)
451
452
453#define COVER \
454 XEN_HYPER_COVER
455
456#define RFI \
457 XEN_HYPER_RFI; \
458 dv_serialize_data
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
new file mode 100644
index 000000000000..f00fab40854d
--- /dev/null
+++ b/arch/ia64/include/asm/xen/interface.h
@@ -0,0 +1,346 @@
1/******************************************************************************
2 * arch-ia64/hypervisor-if.h
3 *
4 * Guest OS interface to IA64 Xen.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright by those who contributed. (in alphabetical order)
25 *
26 * Anthony Xu <anthony.xu@intel.com>
27 * Eddie Dong <eddie.dong@intel.com>
28 * Fred Yang <fred.yang@intel.com>
29 * Kevin Tian <kevin.tian@intel.com>
30 * Alex Williamson <alex.williamson@hp.com>
31 * Chris Wright <chrisw@sous-sol.org>
32 * Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
33 * Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
34 * Hollis Blanchard <hollisb@us.ibm.com>
35 * Isaku Yamahata <yamahata@valinux.co.jp>
36 * Jan Beulich <jbeulich@novell.com>
37 * John Levon <john.levon@sun.com>
38 * Kazuhiro Suzuki <kaz@jp.fujitsu.com>
39 * Keir Fraser <keir.fraser@citrix.com>
40 * Kouya Shimura <kouya@jp.fujitsu.com>
41 * Masaki Kanno <kanno.masaki@jp.fujitsu.com>
42 * Matt Chapman <matthewc@hp.com>
43 * Matthew Chapman <matthewc@hp.com>
44 * Samuel Thibault <samuel.thibault@eu.citrix.com>
45 * Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
46 * Tristan Gingold <tgingold@free.fr>
47 * Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
48 * Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com>
49 * Zhang Xin <xing.z.zhang@intel.com>
50 * Zhang xiantao <xiantao.zhang@intel.com>
51 * dan.magenheimer@hp.com
52 * ian.pratt@cl.cam.ac.uk
53 * michael.fetterman@cl.cam.ac.uk
54 */
55
56#ifndef _ASM_IA64_XEN_INTERFACE_H
57#define _ASM_IA64_XEN_INTERFACE_H
58
59#define __DEFINE_GUEST_HANDLE(name, type) \
60 typedef struct { type *p; } __guest_handle_ ## name
61
62#define DEFINE_GUEST_HANDLE_STRUCT(name) \
63 __DEFINE_GUEST_HANDLE(name, struct name)
64#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
65#define GUEST_HANDLE(name) __guest_handle_ ## name
66#define GUEST_HANDLE_64(name) GUEST_HANDLE(name)
67#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
68
69#ifndef __ASSEMBLY__
70/* Guest handles for primitive C types. */
71__DEFINE_GUEST_HANDLE(uchar, unsigned char);
72__DEFINE_GUEST_HANDLE(uint, unsigned int);
73__DEFINE_GUEST_HANDLE(ulong, unsigned long);
74__DEFINE_GUEST_HANDLE(u64, unsigned long);
75DEFINE_GUEST_HANDLE(char);
76DEFINE_GUEST_HANDLE(int);
77DEFINE_GUEST_HANDLE(long);
78DEFINE_GUEST_HANDLE(void);
79
80typedef unsigned long xen_pfn_t;
81DEFINE_GUEST_HANDLE(xen_pfn_t);
82#define PRI_xen_pfn "lx"
83#endif
84
85/* Arch specific VIRQs definition */
86#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */
87#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */
88#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */
89
90/* Maximum number of virtual CPUs in multi-processor guests. */
91/* keep sizeof(struct shared_page) <= PAGE_SIZE.
92 * this is checked in arch/ia64/xen/hypervisor.c. */
93#define MAX_VIRT_CPUS 64
94
95#ifndef __ASSEMBLY__
96
97#define INVALID_MFN (~0UL)
98
99union vac {
100 unsigned long value;
101 struct {
102 int a_int:1;
103 int a_from_int_cr:1;
104 int a_to_int_cr:1;
105 int a_from_psr:1;
106 int a_from_cpuid:1;
107 int a_cover:1;
108 int a_bsw:1;
109 long reserved:57;
110 };
111};
112
113union vdc {
114 unsigned long value;
115 struct {
116 int d_vmsw:1;
117 int d_extint:1;
118 int d_ibr_dbr:1;
119 int d_pmc:1;
120 int d_to_pmd:1;
121 int d_itm:1;
122 long reserved:58;
123 };
124};
125
126struct mapped_regs {
127 union vac vac;
128 union vdc vdc;
129 unsigned long virt_env_vaddr;
130 unsigned long reserved1[29];
131 unsigned long vhpi;
132 unsigned long reserved2[95];
133 union {
134 unsigned long vgr[16];
135 unsigned long bank1_regs[16]; /* bank1 regs (r16-r31)
136 when bank0 active */
137 };
138 union {
139 unsigned long vbgr[16];
140 unsigned long bank0_regs[16]; /* bank0 regs (r16-r31)
141 when bank1 active */
142 };
143 unsigned long vnat;
144 unsigned long vbnat;
145 unsigned long vcpuid[5];
146 unsigned long reserved3[11];
147 unsigned long vpsr;
148 unsigned long vpr;
149 unsigned long reserved4[76];
150 union {
151 unsigned long vcr[128];
152 struct {
153 unsigned long dcr; /* CR0 */
154 unsigned long itm;
155 unsigned long iva;
156 unsigned long rsv1[5];
157 unsigned long pta; /* CR8 */
158 unsigned long rsv2[7];
159 unsigned long ipsr; /* CR16 */
160 unsigned long isr;
161 unsigned long rsv3;
162 unsigned long iip;
163 unsigned long ifa;
164 unsigned long itir;
165 unsigned long iipa;
166 unsigned long ifs;
167 unsigned long iim; /* CR24 */
168 unsigned long iha;
169 unsigned long rsv4[38];
170 unsigned long lid; /* CR64 */
171 unsigned long ivr;
172 unsigned long tpr;
173 unsigned long eoi;
174 unsigned long irr[4];
175 unsigned long itv; /* CR72 */
176 unsigned long pmv;
177 unsigned long cmcv;
178 unsigned long rsv5[5];
179 unsigned long lrr0; /* CR80 */
180 unsigned long lrr1;
181 unsigned long rsv6[46];
182 };
183 };
184 union {
185 unsigned long reserved5[128];
186 struct {
187 unsigned long precover_ifs;
188 unsigned long unat; /* not sure if this is needed
189 until NaT arch is done */
190 int interrupt_collection_enabled; /* virtual psr.ic */
191
192 /* virtual interrupt deliverable flag is
193 * evtchn_upcall_mask in shared info area now.
194 * interrupt_mask_addr is the address
195 * of evtchn_upcall_mask for current vcpu
196 */
197 unsigned char *interrupt_mask_addr;
198 int pending_interruption;
199 unsigned char vpsr_pp;
200 unsigned char vpsr_dfh;
201 unsigned char hpsr_dfh;
202 unsigned char hpsr_mfh;
203 unsigned long reserved5_1[4];
204 int metaphysical_mode; /* 1 = use metaphys mapping
205 0 = use virtual */
206 int banknum; /* 0 or 1, which virtual
207 register bank is active */
208 unsigned long rrs[8]; /* region registers */
209 unsigned long krs[8]; /* kernel registers */
210 unsigned long tmp[16]; /* temp registers
211 (e.g. for hyperprivops) */
212 };
213 };
214};
215
216struct arch_vcpu_info {
217 /* nothing */
218};
219
220/*
221 * This structure is used for magic page in domain pseudo physical address
222 * space and the result of XENMEM_machine_memory_map.
223 * As the XENMEM_machine_memory_map result,
224 * xen_memory_map::nr_entries indicates the size in bytes
225 * including struct xen_ia64_memmap_info. Not the number of entries.
226 */
227struct xen_ia64_memmap_info {
228 uint64_t efi_memmap_size; /* size of EFI memory map */
229 uint64_t efi_memdesc_size; /* size of an EFI memory map
230 * descriptor */
231 uint32_t efi_memdesc_version; /* memory descriptor version */
232 void *memdesc[0]; /* array of efi_memory_desc_t */
233};
234
235struct arch_shared_info {
236 /* PFN of the start_info page. */
237 unsigned long start_info_pfn;
238
239 /* Interrupt vector for event channel. */
240 int evtchn_vector;
241
242 /* PFN of memmap_info page */
243 unsigned int memmap_info_num_pages; /* currently only = 1 case is
244 supported. */
245 unsigned long memmap_info_pfn;
246
247 uint64_t pad[31];
248};
249
250struct xen_callback {
251 unsigned long ip;
252};
253typedef struct xen_callback xen_callback_t;
254
255#endif /* !__ASSEMBLY__ */
256
257/* Size of the shared_info area (this is not related to page size). */
258#define XSI_SHIFT 14
259#define XSI_SIZE (1 << XSI_SHIFT)
260/* Log size of mapped_regs area (64 KB - only 4KB is used). */
261#define XMAPPEDREGS_SHIFT 12
262#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT)
263/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
264#define XMAPPEDREGS_OFS XSI_SIZE
265
266/* Hyperprivops. */
267#define HYPERPRIVOP_START 0x1
268#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0)
269#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1)
270#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2)
271#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3)
272#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4)
273#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5)
274#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6)
275#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7)
276#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8)
277#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9)
278#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa)
279#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb)
280#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc)
281#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd)
282#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe)
283#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf)
284#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10)
285#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11)
286#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12)
287#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13)
288#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14)
289#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15)
290#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16)
291#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17)
292#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18)
293#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19)
294#define HYPERPRIVOP_MAX (0x1a)
295
296/* Fast and light hypercalls. */
297#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
298
299/* Xencomm macros. */
300#define XENCOMM_INLINE_MASK 0xf800000000000000UL
301#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
302
303#ifndef __ASSEMBLY__
304
305/*
306 * Optimization features.
307 * The hypervisor may do some special optimizations for guests. This hypercall
308 * can be used to switch on/of these special optimizations.
309 */
310#define __HYPERVISOR_opt_feature 0x700UL
311
312#define XEN_IA64_OPTF_OFF 0x0
313#define XEN_IA64_OPTF_ON 0x1
314
315/*
316 * If this feature is switched on, the hypervisor inserts the
317 * tlb entries without calling the guests traphandler.
318 * This is useful in guests using region 7 for identity mapping
319 * like the linux kernel does.
320 */
321#define XEN_IA64_OPTF_IDENT_MAP_REG7 1
322
323/* Identity mapping of region 4 addresses in HVM. */
324#define XEN_IA64_OPTF_IDENT_MAP_REG4 2
325
326/* Identity mapping of region 5 addresses in HVM. */
327#define XEN_IA64_OPTF_IDENT_MAP_REG5 3
328
329#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0)
330
331struct xen_ia64_opt_feature {
332 unsigned long cmd; /* Which feature */
333 unsigned char on; /* Switch feature on/off */
334 union {
335 struct {
336 /* The page protection bit mask of the pte.
337 * This will be or'ed with the pte. */
338 unsigned long pgprot;
339 unsigned long key; /* A protection key for itir.*/
340 };
341 };
342};
343
344#endif /* __ASSEMBLY__ */
345
346#endif /* _ASM_IA64_XEN_INTERFACE_H */
diff --git a/arch/ia64/include/asm/xen/irq.h b/arch/ia64/include/asm/xen/irq.h
new file mode 100644
index 000000000000..a90450983003
--- /dev/null
+++ b/arch/ia64/include/asm/xen/irq.h
@@ -0,0 +1,44 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/irq.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_XEN_IRQ_H
24#define _ASM_IA64_XEN_IRQ_H
25
26/*
27 * The flat IRQ space is divided into two regions:
28 * 1. A one-to-one mapping of real physical IRQs. This space is only used
29 * if we have physical device-access privilege. This region is at the
30 * start of the IRQ space so that existing device drivers do not need
31 * to be modified to translate physical IRQ numbers into our IRQ space.
32 * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
33 * are bound using the provided bind/unbind functions.
34 */
35
36#define XEN_PIRQ_BASE 0
37#define XEN_NR_PIRQS 256
38
39#define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS)
40#define XEN_NR_DYNIRQS (NR_CPUS * 8)
41
42#define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS)
43
44#endif /* _ASM_IA64_XEN_IRQ_H */
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
new file mode 100644
index 000000000000..4d92d9bbda7b
--- /dev/null
+++ b/arch/ia64/include/asm/xen/minstate.h
@@ -0,0 +1,134 @@
1/*
2 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
3 * the minimum state necessary that allows us to turn psr.ic back
4 * on.
5 *
6 * Assumed state upon entry:
7 * psr.ic: off
8 * r31: contains saved predicates (pr)
9 *
10 * Upon exit, the state is as follows:
11 * psr.ic: off
12 * r2 = points to &pt_regs.r16
13 * r8 = contents of ar.ccv
14 * r9 = contents of ar.csd
15 * r10 = contents of ar.ssd
16 * r11 = FPSR_DEFAULT
17 * r12 = kernel sp (kernel virtual address)
18 * r13 = points to current task_struct (kernel virtual address)
19 * p15 = TRUE if psr.i is set in cr.ipsr
20 * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
21 * preserved
22 * CONFIG_XEN note: p6/p7 are not preserved
23 *
24 * Note that psr.ic is NOT turned on by this macro. This is so that
25 * we can pass interruption state as arguments to a handler.
26 */
27#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
28 mov r16=IA64_KR(CURRENT); /* M */ \
29 mov r27=ar.rsc; /* M */ \
30 mov r20=r1; /* A */ \
31 mov r25=ar.unat; /* M */ \
32 MOV_FROM_IPSR(p0,r29); /* M */ \
33 MOV_FROM_IIP(r28); /* M */ \
34 mov r21=ar.fpsr; /* M */ \
35 mov r26=ar.pfs; /* I */ \
36 __COVER; /* B;; (or nothing) */ \
37 adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
38 ;; \
39 ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
40 st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
41 adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
42 /* switch from user to kernel RBS: */ \
43 ;; \
44 invala; /* M */ \
45 /* SAVE_IFS;*/ /* see xen special handling below */ \
46 cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
47 ;; \
48(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
49 ;; \
50(pUStk) mov.m r24=ar.rnat; \
51(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
52(pKStk) mov r1=sp; /* get sp */ \
53 ;; \
54(pUStk) lfetch.fault.excl.nt1 [r22]; \
55(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
56(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
57 ;; \
58(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
59(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
60 ;; \
61(pUStk) mov r18=ar.bsp; \
62(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
63 adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
64 adds r16=PT(CR_IPSR),r1; \
65 ;; \
66 lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
67 st8 [r16]=r29; /* save cr.ipsr */ \
68 ;; \
69 lfetch.fault.excl.nt1 [r17]; \
70 tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
71 mov r29=b0 \
72 ;; \
73 WORKAROUND; \
74 adds r16=PT(R8),r1; /* initialize first base pointer */ \
75 adds r17=PT(R9),r1; /* initialize second base pointer */ \
76(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
77 ;; \
78.mem.offset 0,0; st8.spill [r16]=r8,16; \
79.mem.offset 8,0; st8.spill [r17]=r9,16; \
80 ;; \
81.mem.offset 0,0; st8.spill [r16]=r10,24; \
82 movl r8=XSI_PRECOVER_IFS; \
83.mem.offset 8,0; st8.spill [r17]=r11,24; \
84 ;; \
85 /* xen special handling for possibly lazy cover */ \
86 /* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \
87 ld8 r30=[r8]; \
88(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
89 st8 [r16]=r28,16; /* save cr.iip */ \
90 ;; \
91 st8 [r17]=r30,16; /* save cr.ifs */ \
92 mov r8=ar.ccv; \
93 mov r9=ar.csd; \
94 mov r10=ar.ssd; \
95 movl r11=FPSR_DEFAULT; /* L-unit */ \
96 ;; \
97 st8 [r16]=r25,16; /* save ar.unat */ \
98 st8 [r17]=r26,16; /* save ar.pfs */ \
99 shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
100 ;; \
101 st8 [r16]=r27,16; /* save ar.rsc */ \
102(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
103(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
104 ;; /* avoid RAW on r16 & r17 */ \
105(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
106 st8 [r17]=r31,16; /* save predicates */ \
107(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
108 ;; \
109 st8 [r16]=r29,16; /* save b0 */ \
110 st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
111 cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
112 ;; \
113.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
114.mem.offset 8,0; st8.spill [r17]=r12,16; \
115 adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
116 ;; \
117.mem.offset 0,0; st8.spill [r16]=r13,16; \
118.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
119 mov r13=IA64_KR(CURRENT); /* establish `current' */ \
120 ;; \
121.mem.offset 0,0; st8.spill [r16]=r15,16; \
122.mem.offset 8,0; st8.spill [r17]=r14,16; \
123 ;; \
124.mem.offset 0,0; st8.spill [r16]=r2,16; \
125.mem.offset 8,0; st8.spill [r17]=r3,16; \
126 ACCOUNT_GET_STAMP \
127 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
128 ;; \
129 EXTRA; \
130 movl r1=__gp; /* establish kernel global pointer */ \
131 ;; \
132 ACCOUNT_SYS_ENTER \
133 BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \
134 ;;
diff --git a/arch/ia64/include/asm/xen/page.h b/arch/ia64/include/asm/xen/page.h
new file mode 100644
index 000000000000..03441a780b5b
--- /dev/null
+++ b/arch/ia64/include/asm/xen/page.h
@@ -0,0 +1,65 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/page.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef _ASM_IA64_XEN_PAGE_H
24#define _ASM_IA64_XEN_PAGE_H
25
26#define INVALID_P2M_ENTRY (~0UL)
27
28static inline unsigned long mfn_to_pfn(unsigned long mfn)
29{
30 return mfn;
31}
32
33static inline unsigned long pfn_to_mfn(unsigned long pfn)
34{
35 return pfn;
36}
37
38#define phys_to_machine_mapping_valid(_x) (1)
39
40static inline void *mfn_to_virt(unsigned long mfn)
41{
42 return __va(mfn << PAGE_SHIFT);
43}
44
45static inline unsigned long virt_to_mfn(void *virt)
46{
47 return __pa(virt) >> PAGE_SHIFT;
48}
49
50/* for tpmfront.c */
51static inline unsigned long virt_to_machine(void *virt)
52{
53 return __pa(virt);
54}
55
56static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
57{
58 /* nothing */
59}
60
61#define pte_mfn(_x) pte_pfn(_x)
62#define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */
63#define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */
64
65#endif /* _ASM_IA64_XEN_PAGE_H */
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h
new file mode 100644
index 000000000000..71ec7546e100
--- /dev/null
+++ b/arch/ia64/include/asm/xen/privop.h
@@ -0,0 +1,129 @@
1#ifndef _ASM_IA64_XEN_PRIVOP_H
2#define _ASM_IA64_XEN_PRIVOP_H
3
4/*
5 * Copyright (C) 2005 Hewlett-Packard Co
6 * Dan Magenheimer <dan.magenheimer@hp.com>
7 *
8 * Paravirtualizations of privileged operations for Xen/ia64
9 *
10 *
11 * inline privop and paravirt_alt support
12 * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
13 * VA Linux Systems Japan K.K.
14 *
15 */
16
17#ifndef __ASSEMBLY__
18#include <linux/types.h> /* arch-ia64.h requires uint64_t */
19#endif
20#include <asm/xen/interface.h>
21
22/* At 1 MB, before per-cpu space but still addressable using addl instead
23 of movl. */
24#define XSI_BASE 0xfffffffffff00000
25
26/* Address of mapped regs. */
27#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE)
28
29#ifdef __ASSEMBLY__
30#define XEN_HYPER_RFI break HYPERPRIVOP_RFI
31#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT
32#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT
33#define XEN_HYPER_COVER break HYPERPRIVOP_COVER
34#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D
35#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I
36#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I
37#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR
38#define XEN_HYPER_THASH break HYPERPRIVOP_THASH
39#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D
40#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR
41#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR
42#define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4
43
44#define XSI_IFS (XSI_BASE + XSI_IFS_OFS)
45#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS)
46#define XSI_IFA (XSI_BASE + XSI_IFA_OFS)
47#define XSI_ISR (XSI_BASE + XSI_ISR_OFS)
48#define XSI_IIM (XSI_BASE + XSI_IIM_OFS)
49#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS)
50#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
51#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS)
52#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS)
53#define XSI_IIP (XSI_BASE + XSI_IIP_OFS)
54#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS)
55#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
56#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
57#define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
58#endif
59
60#ifndef __ASSEMBLY__
61
62/************************************************/
63/* Instructions paravirtualized for correctness */
64/************************************************/
65
66/* "fc" and "thash" are privilege-sensitive instructions, meaning they
67 * may have different semantics depending on whether they are executed
68 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
69 * be allowed to execute directly, lest incorrect semantics result. */
70extern void xen_fc(unsigned long addr);
71extern unsigned long xen_thash(unsigned long addr);
72
73/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
74 * is not currently used (though it may be in a long-format VHPT system!)
75 * and the semantics of cover only change if psr.ic is off which is very
76 * rare (and currently non-existent outside of assembly code */
77
78/* There are also privilege-sensitive registers. These registers are
79 * readable at any privilege level but only writable at PL0. */
80extern unsigned long xen_get_cpuid(int index);
81extern unsigned long xen_get_pmd(int index);
82
83extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
84extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
85
86/************************************************/
87/* Instructions paravirtualized for performance */
88/************************************************/
89
90/* Xen uses memory-mapped virtual privileged registers for access to many
91 * performance-sensitive privileged registers. Some, like the processor
92 * status register (psr), are broken up into multiple memory locations.
93 * Others, like "pend", are abstractions based on privileged registers.
94 * "Pend" is guaranteed to be set if reading cr.ivr would return a
95 * (non-spurious) interrupt. */
96#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
97
98#define XSI_PSR_I \
99 (*XEN_MAPPEDREGS->interrupt_mask_addr)
100#define xen_get_virtual_psr_i() \
101 (!XSI_PSR_I)
102#define xen_set_virtual_psr_i(_val) \
103 ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
104#define xen_set_virtual_psr_ic(_val) \
105 ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
106#define xen_get_virtual_pend() \
107 (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
108
109/* Although all privileged operations can be left to trap and will
110 * be properly handled by Xen, some are frequent enough that we use
111 * hyperprivops for performance. */
112extern unsigned long xen_get_psr(void);
113extern unsigned long xen_get_ivr(void);
114extern unsigned long xen_get_tpr(void);
115extern void xen_hyper_ssm_i(void);
116extern void xen_set_itm(unsigned long);
117extern void xen_set_tpr(unsigned long);
118extern void xen_eoi(unsigned long);
119extern unsigned long xen_get_rr(unsigned long index);
120extern void xen_set_rr(unsigned long index, unsigned long val);
121extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
122 unsigned long val2, unsigned long val3,
123 unsigned long val4);
124extern void xen_set_kr(unsigned long index, unsigned long val);
125extern void xen_ptcga(unsigned long addr, unsigned long size);
126
127#endif /* !__ASSEMBLY__ */
128
129#endif /* _ASM_IA64_XEN_PRIVOP_H */
diff --git a/arch/ia64/include/asm/xen/xcom_hcall.h b/arch/ia64/include/asm/xen/xcom_hcall.h
new file mode 100644
index 000000000000..20b2950c71b6
--- /dev/null
+++ b/arch/ia64/include/asm/xen/xcom_hcall.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_IA64_XEN_XCOM_HCALL_H
20#define _ASM_IA64_XEN_XCOM_HCALL_H
21
22/* These function creates inline or mini descriptor for the parameters and
23 calls the corresponding xencomm_arch_hypercall_X.
24 Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
25 they want to use their own wrapper. */
26extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
27
28extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
29
30extern int xencomm_hypercall_xen_version(int cmd, void *arg);
31
32extern int xencomm_hypercall_physdev_op(int cmd, void *op);
33
34extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
35 unsigned int count);
36
37extern int xencomm_hypercall_sched_op(int cmd, void *arg);
38
39extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
40
41extern int xencomm_hypercall_callback_op(int cmd, void *arg);
42
43extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
44
45extern int xencomm_hypercall_suspend(unsigned long srec);
46
47extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
48
49extern long xencomm_hypercall_opt_feature(void *arg);
50
51#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */
diff --git a/arch/ia64/include/asm/xen/xencomm.h b/arch/ia64/include/asm/xen/xencomm.h
new file mode 100644
index 000000000000..cded677bebf2
--- /dev/null
+++ b/arch/ia64/include/asm/xen/xencomm.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_IA64_XEN_XENCOMM_H
20#define _ASM_IA64_XEN_XENCOMM_H
21
22#include <xen/xencomm.h>
23#include <asm/pgtable.h>
24
25/* Must be called before any hypercall. */
26extern void xencomm_initialize(void);
27extern int xencomm_is_initialized(void);
28
29/* Check if virtual contiguity means physical contiguity
30 * where the passed address is a pointer value in virtual address.
31 * On ia64, identity mapping area in region 7 or the piece of region 5
32 * that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL]
33 */
34static inline int xencomm_is_phys_contiguous(unsigned long addr)
35{
36 return (PAGE_OFFSET <= addr &&
37 addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) ||
38 (KERNEL_START <= addr &&
39 addr < KERNEL_START + KERNEL_TR_PAGE_SIZE);
40}
41
42#endif /* _ASM_IA64_XEN_XENCOMM_H */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 87fea11aecb7..c381ea954892 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -42,6 +42,10 @@ obj-$(CONFIG_IA64_ESI) += esi.o
42ifneq ($(CONFIG_IA64_ESI),) 42ifneq ($(CONFIG_IA64_ESI),)
43obj-y += esi_stub.o # must be in kernel proper 43obj-y += esi_stub.o # must be in kernel proper
44endif 44endif
45obj-$(CONFIG_DMAR) += pci-dma.o
46ifeq ($(CONFIG_DMAR), y)
47obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
48endif
45 49
46# The gate DSO image is built using a special linker script. 50# The gate DSO image is built using a special linker script.
47targets += gate.so gate-syms.o 51targets += gate.so gate-syms.o
@@ -112,5 +116,23 @@ clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
112ASM_PARAVIRT_OBJS = ivt.o entry.o 116ASM_PARAVIRT_OBJS = ivt.o entry.o
113define paravirtualized_native 117define paravirtualized_native
114AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE 118AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
119AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK
120extra-y += pvchk-$(1)
115endef 121endef
116$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj)))) 122$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj))))
123
124#
125# Checker for paravirtualizations of privileged operations.
126#
127quiet_cmd_pv_check_sed = PVCHK $@
128define cmd_pv_check_sed
129 sed -f $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed $< > $@
130endef
131
132$(obj)/pvchk-sed-%.s: $(src)/%.S $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed FORCE
133 $(call if_changed_dep,as_s_S)
134$(obj)/pvchk-%.s: $(obj)/pvchk-sed-%.s FORCE
135 $(call if_changed,pv_check_sed)
136$(obj)/pvchk-%.o: $(obj)/pvchk-%.s FORCE
137 $(call if_changed,as_o_S)
138.PRECIOUS: $(obj)/pvchk-sed-%.s $(obj)/pvchk-%.s $(obj)/pvchk-%.o
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 5d1eb7ee2bf6..0635015d0aaa 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -52,6 +52,7 @@
52#include <asm/numa.h> 52#include <asm/numa.h>
53#include <asm/sal.h> 53#include <asm/sal.h>
54#include <asm/cyclone.h> 54#include <asm/cyclone.h>
55#include <asm/xen/hypervisor.h>
55 56
56#define BAD_MADT_ENTRY(entry, end) ( \ 57#define BAD_MADT_ENTRY(entry, end) ( \
57 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ 58 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
@@ -91,6 +92,9 @@ acpi_get_sysname(void)
91 struct acpi_table_rsdp *rsdp; 92 struct acpi_table_rsdp *rsdp;
92 struct acpi_table_xsdt *xsdt; 93 struct acpi_table_xsdt *xsdt;
93 struct acpi_table_header *hdr; 94 struct acpi_table_header *hdr;
95#ifdef CONFIG_DMAR
96 u64 i, nentries;
97#endif
94 98
95 rsdp_phys = acpi_find_rsdp(); 99 rsdp_phys = acpi_find_rsdp();
96 if (!rsdp_phys) { 100 if (!rsdp_phys) {
@@ -121,7 +125,21 @@ acpi_get_sysname(void)
121 return "uv"; 125 return "uv";
122 else 126 else
123 return "sn2"; 127 return "sn2";
128 } else if (xen_pv_domain() && !strcmp(hdr->oem_id, "XEN")) {
129 return "xen";
130 }
131
132#ifdef CONFIG_DMAR
133 /* Look for Intel IOMMU */
134 nentries = (hdr->length - sizeof(*hdr)) /
135 sizeof(xsdt->table_offset_entry[0]);
136 for (i = 0; i < nentries; i++) {
137 hdr = __va(xsdt->table_offset_entry[i]);
138 if (strncmp(hdr->signature, ACPI_SIG_DMAR,
139 sizeof(ACPI_SIG_DMAR) - 1) == 0)
140 return "dig_vtd";
124 } 141 }
142#endif
125 143
126 return "dig"; 144 return "dig";
127#else 145#else
@@ -137,6 +155,10 @@ acpi_get_sysname(void)
137 return "uv"; 155 return "uv";
138# elif defined (CONFIG_IA64_DIG) 156# elif defined (CONFIG_IA64_DIG)
139 return "dig"; 157 return "dig";
158# elif defined (CONFIG_IA64_XEN_GUEST)
159 return "xen";
160# elif defined(CONFIG_IA64_DIG_VTD)
161 return "dig_vtd";
140# else 162# else
141# error Unknown platform. Fix acpi.c. 163# error Unknown platform. Fix acpi.c.
142# endif 164# endif
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 94c44b1ccfd0..742dbb1d5a4f 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -16,6 +16,9 @@
16#include <asm/sigcontext.h> 16#include <asm/sigcontext.h>
17#include <asm/mca.h> 17#include <asm/mca.h>
18 18
19#include <asm/xen/interface.h>
20#include <asm/xen/hypervisor.h>
21
19#include "../kernel/sigframe.h" 22#include "../kernel/sigframe.h"
20#include "../kernel/fsyscall_gtod_data.h" 23#include "../kernel/fsyscall_gtod_data.h"
21 24
@@ -286,4 +289,32 @@ void foo(void)
286 offsetof (struct itc_jitter_data_t, itc_jitter)); 289 offsetof (struct itc_jitter_data_t, itc_jitter));
287 DEFINE(IA64_ITC_LASTCYCLE_OFFSET, 290 DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
288 offsetof (struct itc_jitter_data_t, itc_lastcycle)); 291 offsetof (struct itc_jitter_data_t, itc_lastcycle));
292
293#ifdef CONFIG_XEN
294 BLANK();
295
296 DEFINE(XEN_NATIVE_ASM, XEN_NATIVE);
297 DEFINE(XEN_PV_DOMAIN_ASM, XEN_PV_DOMAIN);
298
299#define DEFINE_MAPPED_REG_OFS(sym, field) \
300 DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(struct mapped_regs, field)))
301
302 DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
303 DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
304 DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
305 DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
306 DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
307 DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
308 DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
309 DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
310 DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
311 DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
312 DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
313 DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
314 DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
315 DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
316 DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
317 DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
318 DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
319#endif /* CONFIG_XEN */
289} 320}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 0dd6c1419d8d..7ef0c594f5ed 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -534,6 +534,11 @@ GLOBAL_ENTRY(ia64_trace_syscall)
534 stf.spill [r16]=f10 534 stf.spill [r16]=f10
535 stf.spill [r17]=f11 535 stf.spill [r17]=f11
536 br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args 536 br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
537 cmp.lt p6,p0=r8,r0 // check tracehook
538 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
539 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
540 mov r10=0
541(p6) br.cond.sptk strace_error // syscall failed ->
537 adds r16=PT(F6)+16,sp 542 adds r16=PT(F6)+16,sp
538 adds r17=PT(F7)+16,sp 543 adds r17=PT(F7)+16,sp
539 ;; 544 ;;
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 416a952b19bd..f675d8e33853 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -580,7 +580,7 @@ ENTRY(dirty_bit)
580 mov b0=r29 // restore b0 580 mov b0=r29 // restore b0
581 ;; 581 ;;
582 st8 [r17]=r18 // store back updated PTE 582 st8 [r17]=r18 // store back updated PTE
583 itc.d r18 // install updated PTE 583 ITC_D(p0, r18, r16) // install updated PTE
584#endif 584#endif
585 mov pr=r31,-1 // restore pr 585 mov pr=r31,-1 // restore pr
586 RFI 586 RFI
@@ -646,7 +646,7 @@ ENTRY(iaccess_bit)
646 mov b0=r29 // restore b0 646 mov b0=r29 // restore b0
647 ;; 647 ;;
648 st8 [r17]=r18 // store back updated PTE 648 st8 [r17]=r18 // store back updated PTE
649 itc.i r18 // install updated PTE 649 ITC_I(p0, r18, r16) // install updated PTE
650#endif /* !CONFIG_SMP */ 650#endif /* !CONFIG_SMP */
651 mov pr=r31,-1 651 mov pr=r31,-1
652 RFI 652 RFI
@@ -698,7 +698,7 @@ ENTRY(daccess_bit)
698 or r18=_PAGE_A,r18 // set the accessed bit 698 or r18=_PAGE_A,r18 // set the accessed bit
699 ;; 699 ;;
700 st8 [r17]=r18 // store back updated PTE 700 st8 [r17]=r18 // store back updated PTE
701 itc.d r18 // install updated PTE 701 ITC_D(p0, r18, r16) // install updated PTE
702#endif 702#endif
703 mov b0=r29 // restore b0 703 mov b0=r29 // restore b0
704 mov pr=r31,-1 704 mov pr=r31,-1
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 60c6ef67ebb2..702a09c13238 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -5,6 +5,7 @@
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/irq.h> 6#include <linux/irq.h>
7#include <linux/msi.h> 7#include <linux/msi.h>
8#include <linux/dmar.h>
8#include <asm/smp.h> 9#include <asm/smp.h>
9 10
10/* 11/*
@@ -162,3 +163,82 @@ void arch_teardown_msi_irq(unsigned int irq)
162 163
163 return ia64_teardown_msi_irq(irq); 164 return ia64_teardown_msi_irq(irq);
164} 165}
166
167#ifdef CONFIG_DMAR
168#ifdef CONFIG_SMP
169static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
170{
171 struct irq_cfg *cfg = irq_cfg + irq;
172 struct msi_msg msg;
173 int cpu = first_cpu(mask);
174
175
176 if (!cpu_online(cpu))
177 return;
178
179 if (irq_prepare_move(irq, cpu))
180 return;
181
182 dmar_msi_read(irq, &msg);
183
184 msg.data &= ~MSI_DATA_VECTOR_MASK;
185 msg.data |= MSI_DATA_VECTOR(cfg->vector);
186 msg.address_lo &= ~MSI_ADDR_DESTID_MASK;
187 msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
188
189 dmar_msi_write(irq, &msg);
190 irq_desc[irq].affinity = mask;
191}
192#endif /* CONFIG_SMP */
193
194struct irq_chip dmar_msi_type = {
195 .name = "DMAR_MSI",
196 .unmask = dmar_msi_unmask,
197 .mask = dmar_msi_mask,
198 .ack = ia64_ack_msi_irq,
199#ifdef CONFIG_SMP
200 .set_affinity = dmar_msi_set_affinity,
201#endif
202 .retrigger = ia64_msi_retrigger_irq,
203};
204
205static int
206msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
207{
208 struct irq_cfg *cfg = irq_cfg + irq;
209 unsigned dest;
210 cpumask_t mask;
211
212 cpus_and(mask, irq_to_domain(irq), cpu_online_map);
213 dest = cpu_physical_id(first_cpu(mask));
214
215 msg->address_hi = 0;
216 msg->address_lo =
217 MSI_ADDR_HEADER |
218 MSI_ADDR_DESTMODE_PHYS |
219 MSI_ADDR_REDIRECTION_CPU |
220 MSI_ADDR_DESTID_CPU(dest);
221
222 msg->data =
223 MSI_DATA_TRIGGER_EDGE |
224 MSI_DATA_LEVEL_ASSERT |
225 MSI_DATA_DELIVERY_FIXED |
226 MSI_DATA_VECTOR(cfg->vector);
227 return 0;
228}
229
230int arch_setup_dmar_msi(unsigned int irq)
231{
232 int ret;
233 struct msi_msg msg;
234
235 ret = msi_compose_msg(NULL, irq, &msg);
236 if (ret < 0)
237 return ret;
238 dmar_msi_write(irq, &msg);
239 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
240 "edge");
241 return 0;
242}
243#endif /* CONFIG_DMAR */
244
diff --git a/arch/ia64/kernel/nr-irqs.c b/arch/ia64/kernel/nr-irqs.c
index 8273afc32db8..ee564575148e 100644
--- a/arch/ia64/kernel/nr-irqs.c
+++ b/arch/ia64/kernel/nr-irqs.c
@@ -10,6 +10,7 @@
10#include <linux/kbuild.h> 10#include <linux/kbuild.h>
11#include <linux/threads.h> 11#include <linux/threads.h>
12#include <asm/native/irq.h> 12#include <asm/native/irq.h>
13#include <asm/xen/irq.h>
13 14
14void foo(void) 15void foo(void)
15{ 16{
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
index afaf5b9a2cf0..de35d8e8b7d2 100644
--- a/arch/ia64/kernel/paravirt.c
+++ b/arch/ia64/kernel/paravirt.c
@@ -332,7 +332,7 @@ ia64_native_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
332 332
333struct pv_iosapic_ops pv_iosapic_ops = { 333struct pv_iosapic_ops pv_iosapic_ops = {
334 .pcat_compat_init = ia64_native_iosapic_pcat_compat_init, 334 .pcat_compat_init = ia64_native_iosapic_pcat_compat_init,
335 .get_irq_chip = ia64_native_iosapic_get_irq_chip, 335 .__get_irq_chip = ia64_native_iosapic_get_irq_chip,
336 336
337 .__read = ia64_native_iosapic_read, 337 .__read = ia64_native_iosapic_read,
338 .__write = ia64_native_iosapic_write, 338 .__write = ia64_native_iosapic_write,
diff --git a/arch/ia64/kernel/paravirt_inst.h b/arch/ia64/kernel/paravirt_inst.h
index 5cad6fb2ed19..64d6d810c64b 100644
--- a/arch/ia64/kernel/paravirt_inst.h
+++ b/arch/ia64/kernel/paravirt_inst.h
@@ -20,7 +20,9 @@
20 * 20 *
21 */ 21 */
22 22
23#ifdef __IA64_ASM_PARAVIRTUALIZED_XEN 23#ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK
24#include <asm/native/pvchk_inst.h>
25#elif defined(__IA64_ASM_PARAVIRTUALIZED_XEN)
24#include <asm/xen/inst.h> 26#include <asm/xen/inst.h>
25#include <asm/xen/minstate.h> 27#include <asm/xen/minstate.h>
26#else 28#else
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
new file mode 100644
index 000000000000..10a75b557650
--- /dev/null
+++ b/arch/ia64/kernel/pci-dma.c
@@ -0,0 +1,129 @@
1/*
2 * Dynamic DMA mapping support.
3 */
4
5#include <linux/types.h>
6#include <linux/mm.h>
7#include <linux/string.h>
8#include <linux/pci.h>
9#include <linux/module.h>
10#include <linux/dmar.h>
11#include <asm/iommu.h>
12#include <asm/machvec.h>
13#include <linux/dma-mapping.h>
14
15#include <asm/machvec.h>
16#include <asm/system.h>
17
18#ifdef CONFIG_DMAR
19
20#include <linux/kernel.h>
21#include <linux/string.h>
22
23#include <asm/page.h>
24#include <asm/iommu.h>
25
26dma_addr_t bad_dma_address __read_mostly;
27EXPORT_SYMBOL(bad_dma_address);
28
29static int iommu_sac_force __read_mostly;
30
31int no_iommu __read_mostly;
32#ifdef CONFIG_IOMMU_DEBUG
33int force_iommu __read_mostly = 1;
34#else
35int force_iommu __read_mostly;
36#endif
37
38/* Set this to 1 if there is a HW IOMMU in the system */
39int iommu_detected __read_mostly;
40
41/* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible
43 to i386. */
44struct device fallback_dev = {
45 .bus_id = "fallback device",
46 .coherent_dma_mask = DMA_32BIT_MASK,
47 .dma_mask = &fallback_dev.coherent_dma_mask,
48};
49
50void __init pci_iommu_alloc(void)
51{
52 /*
53 * The order of these functions is important for
54 * fall-back/fail-over reasons
55 */
56 detect_intel_iommu();
57
58#ifdef CONFIG_SWIOTLB
59 pci_swiotlb_init();
60#endif
61}
62
63static int __init pci_iommu_init(void)
64{
65 if (iommu_detected)
66 intel_iommu_init();
67
68 return 0;
69}
70
71/* Must execute after PCI subsystem */
72fs_initcall(pci_iommu_init);
73
74void pci_iommu_shutdown(void)
75{
76 return;
77}
78
79void __init
80iommu_dma_init(void)
81{
82 return;
83}
84
85struct dma_mapping_ops *dma_ops;
86EXPORT_SYMBOL(dma_ops);
87
88int iommu_dma_supported(struct device *dev, u64 mask)
89{
90 struct dma_mapping_ops *ops = get_dma_ops(dev);
91
92#ifdef CONFIG_PCI
93 if (mask > 0xffffffff && forbid_dac > 0) {
94 dev_info(dev, "Disallowing DAC for device\n");
95 return 0;
96 }
97#endif
98
99 if (ops->dma_supported_op)
100 return ops->dma_supported_op(dev, mask);
101
102 /* Copied from i386. Doesn't make much sense, because it will
103 only work for pci_alloc_coherent.
104 The caller just has to use GFP_DMA in this case. */
105 if (mask < DMA_24BIT_MASK)
106 return 0;
107
108 /* Tell the device to use SAC when IOMMU force is on. This
109 allows the driver to use cheaper accesses in some cases.
110
111 Problem with this is that if we overflow the IOMMU area and
112 return DAC as fallback address the device may not handle it
113 correctly.
114
115 As a special case some controllers have a 39bit address
116 mode that is as efficient as 32bit (aic79xx). Don't force
117 SAC for these. Assume all masks <= 40 bits are of this
118 type. Normally this doesn't make any difference, but gives
119 more gentle handling of IOMMU overflow. */
120 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
121 dev_info(dev, "Force SAC with mask %lx\n", mask);
122 return 0;
123 }
124
125 return 1;
126}
127EXPORT_SYMBOL(iommu_dma_supported);
128
129#endif
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
new file mode 100644
index 000000000000..16c50516dbc1
--- /dev/null
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -0,0 +1,46 @@
1/* Glue code to lib/swiotlb.c */
2
3#include <linux/pci.h>
4#include <linux/cache.h>
5#include <linux/module.h>
6#include <linux/dma-mapping.h>
7
8#include <asm/swiotlb.h>
9#include <asm/dma.h>
10#include <asm/iommu.h>
11#include <asm/machvec.h>
12
13int swiotlb __read_mostly;
14EXPORT_SYMBOL(swiotlb);
15
16struct dma_mapping_ops swiotlb_dma_ops = {
17 .mapping_error = swiotlb_dma_mapping_error,
18 .alloc_coherent = swiotlb_alloc_coherent,
19 .free_coherent = swiotlb_free_coherent,
20 .map_single = swiotlb_map_single,
21 .unmap_single = swiotlb_unmap_single,
22 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
23 .sync_single_for_device = swiotlb_sync_single_for_device,
24 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
25 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
26 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
27 .sync_sg_for_device = swiotlb_sync_sg_for_device,
28 .map_sg = swiotlb_map_sg,
29 .unmap_sg = swiotlb_unmap_sg,
30 .dma_supported_op = swiotlb_dma_supported,
31};
32
33void __init pci_swiotlb_init(void)
34{
35 if (!iommu_detected) {
36#ifdef CONFIG_IA64_GENERIC
37 swiotlb = 1;
38 printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
39 machvec_init("dig");
40 swiotlb_init();
41 dma_ops = &swiotlb_dma_ops;
42#else
43 panic("Unable to find Intel IOMMU");
44#endif
45 }
46}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index fc8f3509df27..ada4605d1223 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -40,6 +40,7 @@
40#include <linux/capability.h> 40#include <linux/capability.h>
41#include <linux/rcupdate.h> 41#include <linux/rcupdate.h>
42#include <linux/completion.h> 42#include <linux/completion.h>
43#include <linux/tracehook.h>
43 44
44#include <asm/errno.h> 45#include <asm/errno.h>
45#include <asm/intrinsics.h> 46#include <asm/intrinsics.h>
@@ -3684,7 +3685,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3684 3685
3685 PFM_SET_WORK_PENDING(task, 1); 3686 PFM_SET_WORK_PENDING(task, 1);
3686 3687
3687 tsk_set_notify_resume(task); 3688 set_notify_resume(task);
3688 3689
3689 /* 3690 /*
3690 * XXX: send reschedule if task runs on another CPU 3691 * XXX: send reschedule if task runs on another CPU
@@ -5044,8 +5045,6 @@ pfm_handle_work(void)
5044 5045
5045 PFM_SET_WORK_PENDING(current, 0); 5046 PFM_SET_WORK_PENDING(current, 0);
5046 5047
5047 tsk_clear_notify_resume(current);
5048
5049 regs = task_pt_regs(current); 5048 regs = task_pt_regs(current);
5050 5049
5051 /* 5050 /*
@@ -5414,7 +5413,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
5414 * when coming from ctxsw, current still points to the 5413 * when coming from ctxsw, current still points to the
5415 * previous task, therefore we must work with task and not current. 5414 * previous task, therefore we must work with task and not current.
5416 */ 5415 */
5417 tsk_set_notify_resume(task); 5416 set_notify_resume(task);
5418 } 5417 }
5419 /* 5418 /*
5420 * defer until state is changed (shorten spin window). the context is locked 5419 * defer until state is changed (shorten spin window). the context is locked
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 3ab8373103ec..c57162705147 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -28,6 +28,7 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/kdebug.h> 29#include <linux/kdebug.h>
30#include <linux/utsname.h> 30#include <linux/utsname.h>
31#include <linux/tracehook.h>
31 32
32#include <asm/cpu.h> 33#include <asm/cpu.h>
33#include <asm/delay.h> 34#include <asm/delay.h>
@@ -160,21 +161,6 @@ show_regs (struct pt_regs *regs)
160 show_stack(NULL, NULL); 161 show_stack(NULL, NULL);
161} 162}
162 163
163void tsk_clear_notify_resume(struct task_struct *tsk)
164{
165#ifdef CONFIG_PERFMON
166 if (tsk->thread.pfm_needs_checking)
167 return;
168#endif
169 if (test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_RSE))
170 return;
171 clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME);
172}
173
174/*
175 * do_notify_resume_user():
176 * Called from notify_resume_user at entry.S, with interrupts disabled.
177 */
178void 164void
179do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) 165do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
180{ 166{
@@ -203,6 +189,11 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
203 ia64_do_signal(scr, in_syscall); 189 ia64_do_signal(scr, in_syscall);
204 } 190 }
205 191
192 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
193 clear_thread_flag(TIF_NOTIFY_RESUME);
194 tracehook_notify_resume(&scr->pt);
195 }
196
206 /* copy user rbs to kernel rbs */ 197 /* copy user rbs to kernel rbs */
207 if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) { 198 if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) {
208 local_irq_enable(); /* force interrupt enable */ 199 local_irq_enable(); /* force interrupt enable */
@@ -251,7 +242,6 @@ default_idle (void)
251/* We don't actually take CPU down, just spin without interrupts. */ 242/* We don't actually take CPU down, just spin without interrupts. */
252static inline void play_dead(void) 243static inline void play_dead(void)
253{ 244{
254 extern void ia64_cpu_local_tick (void);
255 unsigned int this_cpu = smp_processor_id(); 245 unsigned int this_cpu = smp_processor_id();
256 246
257 /* Ack it */ 247 /* Ack it */
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 2a9943b5947f..92c9689b7d97 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -22,6 +22,7 @@
22#include <linux/signal.h> 22#include <linux/signal.h>
23#include <linux/regset.h> 23#include <linux/regset.h>
24#include <linux/elf.h> 24#include <linux/elf.h>
25#include <linux/tracehook.h>
25 26
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
27#include <asm/processor.h> 28#include <asm/processor.h>
@@ -603,7 +604,7 @@ void ia64_ptrace_stop(void)
603{ 604{
604 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) 605 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
605 return; 606 return;
606 tsk_set_notify_resume(current); 607 set_notify_resume(current);
607 unw_init_running(do_sync_rbs, ia64_sync_user_rbs); 608 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
608} 609}
609 610
@@ -613,7 +614,6 @@ void ia64_ptrace_stop(void)
613void ia64_sync_krbs(void) 614void ia64_sync_krbs(void)
614{ 615{
615 clear_tsk_thread_flag(current, TIF_RESTORE_RSE); 616 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
616 tsk_clear_notify_resume(current);
617 617
618 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); 618 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
619} 619}
@@ -644,7 +644,7 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
644 spin_lock_irq(&child->sighand->siglock); 644 spin_lock_irq(&child->sighand->siglock);
645 if (child->state == TASK_STOPPED && 645 if (child->state == TASK_STOPPED &&
646 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { 646 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
647 tsk_set_notify_resume(child); 647 set_notify_resume(child);
648 648
649 child->state = TASK_TRACED; 649 child->state = TASK_TRACED;
650 stopped = 1; 650 stopped = 1;
@@ -1232,37 +1232,16 @@ arch_ptrace (struct task_struct *child, long request, long addr, long data)
1232} 1232}
1233 1233
1234 1234
1235static void
1236syscall_trace (void)
1237{
1238 /*
1239 * The 0x80 provides a way for the tracing parent to
1240 * distinguish between a syscall stop and SIGTRAP delivery.
1241 */
1242 ptrace_notify(SIGTRAP
1243 | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
1244
1245 /*
1246 * This isn't the same as continuing with a signal, but it
1247 * will do for normal use. strace only continues with a
1248 * signal if the stopping signal is not SIGTRAP. -brl
1249 */
1250 if (current->exit_code) {
1251 send_sig(current->exit_code, current, 1);
1252 current->exit_code = 0;
1253 }
1254}
1255
1256/* "asmlinkage" so the input arguments are preserved... */ 1235/* "asmlinkage" so the input arguments are preserved... */
1257 1236
1258asmlinkage void 1237asmlinkage long
1259syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, 1238syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1260 long arg4, long arg5, long arg6, long arg7, 1239 long arg4, long arg5, long arg6, long arg7,
1261 struct pt_regs regs) 1240 struct pt_regs regs)
1262{ 1241{
1263 if (test_thread_flag(TIF_SYSCALL_TRACE) 1242 if (test_thread_flag(TIF_SYSCALL_TRACE))
1264 && (current->ptrace & PT_PTRACED)) 1243 if (tracehook_report_syscall_entry(&regs))
1265 syscall_trace(); 1244 return -ENOSYS;
1266 1245
1267 /* copy user rbs to kernel rbs */ 1246 /* copy user rbs to kernel rbs */
1268 if (test_thread_flag(TIF_RESTORE_RSE)) 1247 if (test_thread_flag(TIF_RESTORE_RSE))
@@ -1283,6 +1262,7 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1283 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); 1262 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
1284 } 1263 }
1285 1264
1265 return 0;
1286} 1266}
1287 1267
1288/* "asmlinkage" so the input arguments are preserved... */ 1268/* "asmlinkage" so the input arguments are preserved... */
@@ -1292,6 +1272,8 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1292 long arg4, long arg5, long arg6, long arg7, 1272 long arg4, long arg5, long arg6, long arg7,
1293 struct pt_regs regs) 1273 struct pt_regs regs)
1294{ 1274{
1275 int step;
1276
1295 if (unlikely(current->audit_context)) { 1277 if (unlikely(current->audit_context)) {
1296 int success = AUDITSC_RESULT(regs.r10); 1278 int success = AUDITSC_RESULT(regs.r10);
1297 long result = regs.r8; 1279 long result = regs.r8;
@@ -1301,10 +1283,9 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1301 audit_syscall_exit(success, result); 1283 audit_syscall_exit(success, result);
1302 } 1284 }
1303 1285
1304 if ((test_thread_flag(TIF_SYSCALL_TRACE) 1286 step = test_thread_flag(TIF_SINGLESTEP);
1305 || test_thread_flag(TIF_SINGLESTEP)) 1287 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1306 && (current->ptrace & PT_PTRACED)) 1288 tracehook_report_syscall_exit(&regs, step);
1307 syscall_trace();
1308 1289
1309 /* copy user rbs to kernel rbs */ 1290 /* copy user rbs to kernel rbs */
1310 if (test_thread_flag(TIF_RESTORE_RSE)) 1291 if (test_thread_flag(TIF_RESTORE_RSE))
@@ -1940,7 +1921,7 @@ gpregs_writeback(struct task_struct *target,
1940{ 1921{
1941 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) 1922 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1942 return 0; 1923 return 0;
1943 tsk_set_notify_resume(target); 1924 set_notify_resume(target);
1944 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, 1925 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1945 NULL, NULL); 1926 NULL, NULL);
1946} 1927}
@@ -2199,3 +2180,68 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2199#endif 2180#endif
2200 return &user_ia64_view; 2181 return &user_ia64_view;
2201} 2182}
2183
2184struct syscall_get_set_args {
2185 unsigned int i;
2186 unsigned int n;
2187 unsigned long *args;
2188 struct pt_regs *regs;
2189 int rw;
2190};
2191
2192static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2193{
2194 struct syscall_get_set_args *args = data;
2195 struct pt_regs *pt = args->regs;
2196 unsigned long *krbs, cfm, ndirty;
2197 int i, count;
2198
2199 if (unw_unwind_to_user(info) < 0)
2200 return;
2201
2202 cfm = pt->cr_ifs;
2203 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2204 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2205
2206 count = 0;
2207 if (in_syscall(pt))
2208 count = min_t(int, args->n, cfm & 0x7f);
2209
2210 for (i = 0; i < count; i++) {
2211 if (args->rw)
2212 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2213 args->args[i];
2214 else
2215 args->args[i] = *ia64_rse_skip_regs(krbs,
2216 ndirty + i + args->i);
2217 }
2218
2219 if (!args->rw) {
2220 while (i < args->n) {
2221 args->args[i] = 0;
2222 i++;
2223 }
2224 }
2225}
2226
2227void ia64_syscall_get_set_arguments(struct task_struct *task,
2228 struct pt_regs *regs, unsigned int i, unsigned int n,
2229 unsigned long *args, int rw)
2230{
2231 struct syscall_get_set_args data = {
2232 .i = i,
2233 .n = n,
2234 .args = args,
2235 .regs = regs,
2236 .rw = rw,
2237 };
2238
2239 if (task == current)
2240 unw_init_running(syscall_get_set_args_cb, &data);
2241 else {
2242 struct unw_frame_info ufi;
2243 memset(&ufi, 0, sizeof(ufi));
2244 unw_init_from_blocked_task(&ufi, task);
2245 syscall_get_set_args_cb(&ufi, &data);
2246 }
2247}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 916ba898237f..ae7911702bf8 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -116,6 +116,13 @@ unsigned int num_io_spaces;
116 */ 116 */
117#define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 117#define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
118unsigned long ia64_i_cache_stride_shift = ~0; 118unsigned long ia64_i_cache_stride_shift = ~0;
119/*
120 * "clflush_cache_range()" needs to know what processor dependent stride size to
121 * use when it flushes cache lines including both d-cache and i-cache.
122 */
123/* Safest way to go: 32 bytes by 32 bytes */
124#define CACHE_STRIDE_SHIFT 5
125unsigned long ia64_cache_stride_shift = ~0;
119 126
120/* 127/*
121 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 128 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
@@ -852,13 +859,14 @@ setup_per_cpu_areas (void)
852} 859}
853 860
854/* 861/*
855 * Calculate the max. cache line size. 862 * Do the following calculations:
856 * 863 *
857 * In addition, the minimum of the i-cache stride sizes is calculated for 864 * 1. the max. cache line size.
858 * "flush_icache_range()". 865 * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
866 * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
859 */ 867 */
860static void __cpuinit 868static void __cpuinit
861get_max_cacheline_size (void) 869get_cache_info(void)
862{ 870{
863 unsigned long line_size, max = 1; 871 unsigned long line_size, max = 1;
864 u64 l, levels, unique_caches; 872 u64 l, levels, unique_caches;
@@ -872,12 +880,14 @@ get_max_cacheline_size (void)
872 max = SMP_CACHE_BYTES; 880 max = SMP_CACHE_BYTES;
873 /* Safest setup for "flush_icache_range()" */ 881 /* Safest setup for "flush_icache_range()" */
874 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 882 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
883 /* Safest setup for "clflush_cache_range()" */
884 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
875 goto out; 885 goto out;
876 } 886 }
877 887
878 for (l = 0; l < levels; ++l) { 888 for (l = 0; l < levels; ++l) {
879 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, 889 /* cache_type (data_or_unified)=2 */
880 &cci); 890 status = ia64_pal_cache_config_info(l, 2, &cci);
881 if (status != 0) { 891 if (status != 0) {
882 printk(KERN_ERR 892 printk(KERN_ERR
883 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 893 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
@@ -885,15 +895,21 @@ get_max_cacheline_size (void)
885 max = SMP_CACHE_BYTES; 895 max = SMP_CACHE_BYTES;
886 /* The safest setup for "flush_icache_range()" */ 896 /* The safest setup for "flush_icache_range()" */
887 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 897 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
898 /* The safest setup for "clflush_cache_range()" */
899 ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
888 cci.pcci_unified = 1; 900 cci.pcci_unified = 1;
901 } else {
902 if (cci.pcci_stride < ia64_cache_stride_shift)
903 ia64_cache_stride_shift = cci.pcci_stride;
904
905 line_size = 1 << cci.pcci_line_size;
906 if (line_size > max)
907 max = line_size;
889 } 908 }
890 line_size = 1 << cci.pcci_line_size; 909
891 if (line_size > max)
892 max = line_size;
893 if (!cci.pcci_unified) { 910 if (!cci.pcci_unified) {
894 status = ia64_pal_cache_config_info(l, 911 /* cache_type (instruction)=1*/
895 /* cache_type (instruction)= */ 1, 912 status = ia64_pal_cache_config_info(l, 1, &cci);
896 &cci);
897 if (status != 0) { 913 if (status != 0) {
898 printk(KERN_ERR 914 printk(KERN_ERR
899 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 915 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
@@ -947,7 +963,7 @@ cpu_init (void)
947 } 963 }
948#endif 964#endif
949 965
950 get_max_cacheline_size(); 966 get_cache_info();
951 967
952 /* 968 /*
953 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 969 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 19c5a78636fc..e12500a9c443 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/ptrace.h> 13#include <linux/ptrace.h>
14#include <linux/tracehook.h>
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/signal.h> 16#include <linux/signal.h>
16#include <linux/smp.h> 17#include <linux/smp.h>
@@ -439,6 +440,13 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse
439 sigaddset(&current->blocked, sig); 440 sigaddset(&current->blocked, sig);
440 recalc_sigpending(); 441 recalc_sigpending();
441 spin_unlock_irq(&current->sighand->siglock); 442 spin_unlock_irq(&current->sighand->siglock);
443
444 /*
445 * Let tracing know that we've done the handler setup.
446 */
447 tracehook_signal_handler(sig, info, ka, &scr->pt,
448 test_thread_flag(TIF_SINGLESTEP));
449
442 return 1; 450 return 1;
443} 451}
444 452
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S
index 2a0d27f2f21b..1d8c88860063 100644
--- a/arch/ia64/lib/flush.S
+++ b/arch/ia64/lib/flush.S
@@ -60,3 +60,58 @@ GLOBAL_ENTRY(flush_icache_range)
60 mov ar.lc=r3 // restore ar.lc 60 mov ar.lc=r3 // restore ar.lc
61 br.ret.sptk.many rp 61 br.ret.sptk.many rp
62END(flush_icache_range) 62END(flush_icache_range)
63
64 /*
65 * clflush_cache_range(start,size)
66 *
67 * Flush cache lines from start to start+size-1.
68 *
69 * Must deal with range from start to start+size-1 but nothing else
70 * (need to be careful not to touch addresses that may be
71 * unmapped).
72 *
73 * Note: "in0" and "in1" are preserved for debugging purposes.
74 */
75 .section .kprobes.text,"ax"
76GLOBAL_ENTRY(clflush_cache_range)
77
78 .prologue
79 alloc r2=ar.pfs,2,0,0,0
80 movl r3=ia64_cache_stride_shift
81 mov r21=1
82 add r22=in1,in0
83 ;;
84 ld8 r20=[r3] // r20: stride shift
85 sub r22=r22,r0,1 // last byte address
86 ;;
87 shr.u r23=in0,r20 // start / (stride size)
88 shr.u r22=r22,r20 // (last byte address) / (stride size)
89 shl r21=r21,r20 // r21: stride size of the i-cache(s)
90 ;;
91 sub r8=r22,r23 // number of strides - 1
92 shl r24=r23,r20 // r24: addresses for "fc" =
93 // "start" rounded down to stride
94 // boundary
95 .save ar.lc,r3
96 mov r3=ar.lc // save ar.lc
97 ;;
98
99 .body
100 mov ar.lc=r8
101 ;;
102 /*
103 * 32 byte aligned loop, even number of (actually 2) bundles
104 */
105.Loop_fc:
106 fc r24 // issuable on M0 only
107 add r24=r21,r24 // we flush "stride size" bytes per iteration
108 nop.i 0
109 br.cloop.sptk.few .Loop_fc
110 ;;
111 sync.i
112 ;;
113 srlz.i
114 ;;
115 mov ar.lc=r3 // restore ar.lc
116 br.ret.sptk.many rp
117END(clflush_cache_range)
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 8caf42471f0d..bd9818a36b47 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -362,9 +362,13 @@ ia64_tlb_init (void)
362 per_cpu(ia64_tr_num, cpu) = 362 per_cpu(ia64_tr_num, cpu) =
363 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1; 363 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
364 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { 364 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
365 static int justonce = 1;
365 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; 366 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
366 printk(KERN_DEBUG "TR register number exceeds IA64_TR_ALLOC_MAX!" 367 if (justonce) {
367 "IA64_TR_ALLOC_MAX should be extended\n"); 368 justonce = 0;
369 printk(KERN_DEBUG "TR register number exceeds "
370 "IA64_TR_ALLOC_MAX!\n");
371 }
368 } 372 }
369} 373}
370 374
diff --git a/arch/ia64/oprofile/init.c b/arch/ia64/oprofile/init.c
index 125a602a660d..31b545c35460 100644
--- a/arch/ia64/oprofile/init.c
+++ b/arch/ia64/oprofile/init.c
@@ -12,11 +12,11 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14 14
15extern int perfmon_init(struct oprofile_operations * ops); 15extern int perfmon_init(struct oprofile_operations *ops);
16extern void perfmon_exit(void); 16extern void perfmon_exit(void);
17extern void ia64_backtrace(struct pt_regs * const regs, unsigned int depth); 17extern void ia64_backtrace(struct pt_regs * const regs, unsigned int depth);
18 18
19int __init oprofile_arch_init(struct oprofile_operations * ops) 19int __init oprofile_arch_init(struct oprofile_operations *ops)
20{ 20{
21 int ret = -ENODEV; 21 int ret = -ENODEV;
22 22
diff --git a/arch/ia64/oprofile/perfmon.c b/arch/ia64/oprofile/perfmon.c
index bc41dd32fec6..192d3e8e1f65 100644
--- a/arch/ia64/oprofile/perfmon.c
+++ b/arch/ia64/oprofile/perfmon.c
@@ -56,7 +56,7 @@ static pfm_buffer_fmt_t oprofile_fmt = {
56}; 56};
57 57
58 58
59static char * get_cpu_type(void) 59static char *get_cpu_type(void)
60{ 60{
61 __u8 family = local_cpu_data->family; 61 __u8 family = local_cpu_data->family;
62 62
@@ -75,7 +75,7 @@ static char * get_cpu_type(void)
75 75
76static int using_perfmon; 76static int using_perfmon;
77 77
78int perfmon_init(struct oprofile_operations * ops) 78int perfmon_init(struct oprofile_operations *ops)
79{ 79{
80 int ret = pfm_register_buffer_fmt(&oprofile_fmt); 80 int ret = pfm_register_buffer_fmt(&oprofile_fmt);
81 if (ret) 81 if (ret)
diff --git a/arch/ia64/scripts/pvcheck.sed b/arch/ia64/scripts/pvcheck.sed
new file mode 100644
index 000000000000..ba66ac2e4c60
--- /dev/null
+++ b/arch/ia64/scripts/pvcheck.sed
@@ -0,0 +1,32 @@
1#
2# Checker for paravirtualizations of privileged operations.
3#
4s/ssm.*psr\.ic.*/.warning \"ssm psr.ic should not be used directly\"/g
5s/rsm.*psr\.ic.*/.warning \"rsm psr.ic should not be used directly\"/g
6s/ssm.*psr\.i.*/.warning \"ssm psr.i should not be used directly\"/g
7s/rsm.*psr\.i.*/.warning \"rsm psr.i should not be used directly\"/g
8s/ssm.*psr\.dt.*/.warning \"ssm psr.dt should not be used directly\"/g
9s/rsm.*psr\.dt.*/.warning \"rsm psr.dt should not be used directly\"/g
10s/mov.*=.*cr\.ifa/.warning \"cr.ifa should not used directly\"/g
11s/mov.*=.*cr\.itir/.warning \"cr.itir should not used directly\"/g
12s/mov.*=.*cr\.isr/.warning \"cr.isr should not used directly\"/g
13s/mov.*=.*cr\.iha/.warning \"cr.iha should not used directly\"/g
14s/mov.*=.*cr\.ipsr/.warning \"cr.ipsr should not used directly\"/g
15s/mov.*=.*cr\.iim/.warning \"cr.iim should not used directly\"/g
16s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g
17s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g
18s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr
19s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g
20s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g
21s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g
22s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g
23s/mov.*cr\.ipsr.*=.*/.warning \"cr.ipsr should not used directly\"/g
24s/mov.*cr\.ifs.*=.*/.warning \"cr.ifs should not used directly\"/g
25s/mov.*cr\.iip.*=.*/.warning \"cr.iip should not used directly\"/g
26s/mov.*cr\.kr.*=.*/.warning \"cr.kr should not used directly\"/g
27s/mov.*ar\.eflags.*=.*/.warning \"ar.eflags should not used directly\"/g
28s/itc\.i.*/.warning \"itc.i should not be used directly.\"/g
29s/itc\.d.*/.warning \"itc.d should not be used directly.\"/g
30s/bsw\.0/.warning \"bsw.0 should not be used directly.\"/g
31s/bsw\.1/.warning \"bsw.1 should not be used directly.\"/g
32s/ptc\.ga.*/.warning \"ptc.ga should not be used directly.\"/g
diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig
new file mode 100644
index 000000000000..f1683a20275b
--- /dev/null
+++ b/arch/ia64/xen/Kconfig
@@ -0,0 +1,26 @@
1#
2# This Kconfig describes xen/ia64 options
3#
4
5config XEN
6 bool "Xen hypervisor support"
7 default y
8 depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB && EXPERIMENTAL
9 select XEN_XENCOMM
10 select NO_IDLE_HZ
11
12 # those are required to save/restore.
13 select ARCH_SUSPEND_POSSIBLE
14 select SUSPEND
15 select PM_SLEEP
16 help
17 Enable Xen hypervisor support. Resulting kernel runs
18 both as a guest OS on Xen and natively on hardware.
19
20config XEN_XENCOMM
21 depends on XEN
22 bool
23
24config NO_IDLE_HZ
25 depends on XEN
26 bool
diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile
new file mode 100644
index 000000000000..0ad0224693d9
--- /dev/null
+++ b/arch/ia64/xen/Makefile
@@ -0,0 +1,22 @@
1#
2# Makefile for Xen components
3#
4
5obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \
6 hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o
7
8obj-$(CONFIG_IA64_GENERIC) += machvec.o
9
10AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN
11
12# xen multi compile
13ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S
14ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o))
15obj-y += $(ASM_PARAVIRT_OBJS)
16define paravirtualized_xen
17AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_XEN
18endef
19$(foreach o,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_xen,$(o))))
20
21$(obj)/xen-%.o: $(src)/../kernel/%.S FORCE
22 $(call if_changed_dep,as_o_S)
diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c
new file mode 100644
index 000000000000..777dd9a9108b
--- /dev/null
+++ b/arch/ia64/xen/grant-table.c
@@ -0,0 +1,155 @@
1/******************************************************************************
2 * arch/ia64/xen/grant-table.c
3 *
4 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/vmalloc.h>
25#include <linux/mm.h>
26
27#include <xen/interface/xen.h>
28#include <xen/interface/memory.h>
29#include <xen/grant_table.h>
30
31#include <asm/xen/hypervisor.h>
32
33struct vm_struct *xen_alloc_vm_area(unsigned long size)
34{
35 int order;
36 unsigned long virt;
37 unsigned long nr_pages;
38 struct vm_struct *area;
39
40 order = get_order(size);
41 virt = __get_free_pages(GFP_KERNEL, order);
42 if (virt == 0)
43 goto err0;
44 nr_pages = 1 << order;
45 scrub_pages(virt, nr_pages);
46
47 area = kmalloc(sizeof(*area), GFP_KERNEL);
48 if (area == NULL)
49 goto err1;
50
51 area->flags = VM_IOREMAP;
52 area->addr = (void *)virt;
53 area->size = size;
54 area->pages = NULL;
55 area->nr_pages = nr_pages;
56 area->phys_addr = 0; /* xenbus_map_ring_valloc uses this field! */
57
58 return area;
59
60err1:
61 free_pages(virt, order);
62err0:
63 return NULL;
64}
65EXPORT_SYMBOL_GPL(xen_alloc_vm_area);
66
67void xen_free_vm_area(struct vm_struct *area)
68{
69 unsigned int order = get_order(area->size);
70 unsigned long i;
71 unsigned long phys_addr = __pa(area->addr);
72
73 /* This area is used for foreign page mappping.
74 * So underlying machine page may not be assigned. */
75 for (i = 0; i < (1 << order); i++) {
76 unsigned long ret;
77 unsigned long gpfn = (phys_addr >> PAGE_SHIFT) + i;
78 struct xen_memory_reservation reservation = {
79 .nr_extents = 1,
80 .address_bits = 0,
81 .extent_order = 0,
82 .domid = DOMID_SELF
83 };
84 set_xen_guest_handle(reservation.extent_start, &gpfn);
85 ret = HYPERVISOR_memory_op(XENMEM_populate_physmap,
86 &reservation);
87 BUG_ON(ret != 1);
88 }
89 free_pages((unsigned long)area->addr, order);
90 kfree(area);
91}
92EXPORT_SYMBOL_GPL(xen_free_vm_area);
93
94
95/****************************************************************************
96 * grant table hack
97 * cmd: GNTTABOP_xxx
98 */
99
100int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
101 unsigned long max_nr_gframes,
102 struct grant_entry **__shared)
103{
104 *__shared = __va(frames[0] << PAGE_SHIFT);
105 return 0;
106}
107
108void arch_gnttab_unmap_shared(struct grant_entry *shared,
109 unsigned long nr_gframes)
110{
111 /* nothing */
112}
113
114static void
115gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
116{
117 uint32_t flags;
118
119 flags = uop->flags;
120
121 if (flags & GNTMAP_host_map) {
122 if (flags & GNTMAP_application_map) {
123 printk(KERN_DEBUG
124 "GNTMAP_application_map is not supported yet: "
125 "flags 0x%x\n", flags);
126 BUG();
127 }
128 if (flags & GNTMAP_contains_pte) {
129 printk(KERN_DEBUG
130 "GNTMAP_contains_pte is not supported yet: "
131 "flags 0x%x\n", flags);
132 BUG();
133 }
134 } else if (flags & GNTMAP_device_map) {
135 printk("GNTMAP_device_map is not supported yet 0x%x\n", flags);
136 BUG(); /* not yet. actually this flag is not used. */
137 } else {
138 BUG();
139 }
140}
141
142int
143HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
144{
145 if (cmd == GNTTABOP_map_grant_ref) {
146 unsigned int i;
147 for (i = 0; i < count; i++) {
148 gnttab_map_grant_ref_pre(
149 (struct gnttab_map_grant_ref *)uop + i);
150 }
151 }
152 return xencomm_hypercall_grant_table_op(cmd, uop, count);
153}
154
155EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
new file mode 100644
index 000000000000..d4ff0b9e79f1
--- /dev/null
+++ b/arch/ia64/xen/hypercall.S
@@ -0,0 +1,91 @@
1/*
2 * Support routines for Xen hypercalls
3 *
4 * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
5 * Copyright (C) 2008 Yaozu (Eddie) Dong <eddie.dong@intel.com>
6 */
7
8#include <asm/asmmacro.h>
9#include <asm/intrinsics.h>
10#include <asm/xen/privop.h>
11
12/*
13 * Hypercalls without parameter.
14 */
15#define __HCALL0(name,hcall) \
16 GLOBAL_ENTRY(name); \
17 break hcall; \
18 br.ret.sptk.many rp; \
19 END(name)
20
21/*
22 * Hypercalls with 1 parameter.
23 */
24#define __HCALL1(name,hcall) \
25 GLOBAL_ENTRY(name); \
26 mov r8=r32; \
27 break hcall; \
28 br.ret.sptk.many rp; \
29 END(name)
30
31/*
32 * Hypercalls with 2 parameters.
33 */
34#define __HCALL2(name,hcall) \
35 GLOBAL_ENTRY(name); \
36 mov r8=r32; \
37 mov r9=r33; \
38 break hcall; \
39 br.ret.sptk.many rp; \
40 END(name)
41
42__HCALL0(xen_get_psr, HYPERPRIVOP_GET_PSR)
43__HCALL0(xen_get_ivr, HYPERPRIVOP_GET_IVR)
44__HCALL0(xen_get_tpr, HYPERPRIVOP_GET_TPR)
45__HCALL0(xen_hyper_ssm_i, HYPERPRIVOP_SSM_I)
46
47__HCALL1(xen_set_tpr, HYPERPRIVOP_SET_TPR)
48__HCALL1(xen_eoi, HYPERPRIVOP_EOI)
49__HCALL1(xen_thash, HYPERPRIVOP_THASH)
50__HCALL1(xen_set_itm, HYPERPRIVOP_SET_ITM)
51__HCALL1(xen_get_rr, HYPERPRIVOP_GET_RR)
52__HCALL1(xen_fc, HYPERPRIVOP_FC)
53__HCALL1(xen_get_cpuid, HYPERPRIVOP_GET_CPUID)
54__HCALL1(xen_get_pmd, HYPERPRIVOP_GET_PMD)
55
56__HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA)
57__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
58__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
59
60#ifdef CONFIG_IA32_SUPPORT
61__HCALL1(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
62__HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8
63#endif /* CONFIG_IA32_SUPPORT */
64
65GLOBAL_ENTRY(xen_set_rr0_to_rr4)
66 mov r8=r32
67 mov r9=r33
68 mov r10=r34
69 mov r11=r35
70 mov r14=r36
71 XEN_HYPER_SET_RR0_TO_RR4
72 br.ret.sptk.many rp
73 ;;
74END(xen_set_rr0_to_rr4)
75
76GLOBAL_ENTRY(xen_send_ipi)
77 mov r14=r32
78 mov r15=r33
79 mov r2=0x400
80 break 0x1000
81 ;;
82 br.ret.sptk.many rp
83 ;;
84END(xen_send_ipi)
85
86GLOBAL_ENTRY(__hypercall)
87 mov r2=r37
88 break 0x1000
89 br.ret.sptk.many b0
90 ;;
91END(__hypercall)
diff --git a/arch/ia64/xen/hypervisor.c b/arch/ia64/xen/hypervisor.c
new file mode 100644
index 000000000000..cac4d97c0b5a
--- /dev/null
+++ b/arch/ia64/xen/hypervisor.c
@@ -0,0 +1,96 @@
1/******************************************************************************
2 * arch/ia64/xen/hypervisor.c
3 *
4 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/efi.h>
24#include <asm/xen/hypervisor.h>
25#include <asm/xen/privop.h>
26
27#include "irq_xen.h"
28
29struct shared_info *HYPERVISOR_shared_info __read_mostly =
30 (struct shared_info *)XSI_BASE;
31EXPORT_SYMBOL(HYPERVISOR_shared_info);
32
33DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
34
35struct start_info *xen_start_info;
36EXPORT_SYMBOL(xen_start_info);
37
38EXPORT_SYMBOL(xen_domain_type);
39
40EXPORT_SYMBOL(__hypercall);
41
42/* Stolen from arch/x86/xen/enlighten.c */
43/*
44 * Flag to determine whether vcpu info placement is available on all
45 * VCPUs. We assume it is to start with, and then set it to zero on
46 * the first failure. This is because it can succeed on some VCPUs
47 * and not others, since it can involve hypervisor memory allocation,
48 * or because the guest failed to guarantee all the appropriate
49 * constraints on all VCPUs (ie buffer can't cross a page boundary).
50 *
51 * Note that any particular CPU may be using a placed vcpu structure,
52 * but we can only optimise if the all are.
53 *
54 * 0: not available, 1: available
55 */
56
57static void __init xen_vcpu_setup(int cpu)
58{
59 /*
60 * WARNING:
61 * before changing MAX_VIRT_CPUS,
62 * check that shared_info fits on a page
63 */
64 BUILD_BUG_ON(sizeof(struct shared_info) > PAGE_SIZE);
65 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
66}
67
68void __init xen_setup_vcpu_info_placement(void)
69{
70 int cpu;
71
72 for_each_possible_cpu(cpu)
73 xen_vcpu_setup(cpu);
74}
75
76void __cpuinit
77xen_cpu_init(void)
78{
79 xen_smp_intr_init();
80}
81
82/**************************************************************************
83 * opt feature
84 */
85void
86xen_ia64_enable_opt_feature(void)
87{
88 /* Enable region 7 identity map optimizations in Xen */
89 struct xen_ia64_opt_feature optf;
90
91 optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7;
92 optf.on = XEN_IA64_OPTF_ON;
93 optf.pgprot = pgprot_val(PAGE_KERNEL);
94 optf.key = 0; /* No key on linux. */
95 HYPERVISOR_opt_feature(&optf);
96}
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
new file mode 100644
index 000000000000..af93aadb68bb
--- /dev/null
+++ b/arch/ia64/xen/irq_xen.c
@@ -0,0 +1,435 @@
1/******************************************************************************
2 * arch/ia64/xen/irq_xen.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/cpu.h>
24
25#include <xen/interface/xen.h>
26#include <xen/interface/callback.h>
27#include <xen/events.h>
28
29#include <asm/xen/privop.h>
30
31#include "irq_xen.h"
32
33/***************************************************************************
34 * pv_irq_ops
35 * irq operations
36 */
37
38static int
39xen_assign_irq_vector(int irq)
40{
41 struct physdev_irq irq_op;
42
43 irq_op.irq = irq;
44 if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
45 return -ENOSPC;
46
47 return irq_op.vector;
48}
49
50static void
51xen_free_irq_vector(int vector)
52{
53 struct physdev_irq irq_op;
54
55 if (vector < IA64_FIRST_DEVICE_VECTOR ||
56 vector > IA64_LAST_DEVICE_VECTOR)
57 return;
58
59 irq_op.vector = vector;
60 if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
61 printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n",
62 __func__, vector);
63}
64
65
66static DEFINE_PER_CPU(int, timer_irq) = -1;
67static DEFINE_PER_CPU(int, ipi_irq) = -1;
68static DEFINE_PER_CPU(int, resched_irq) = -1;
69static DEFINE_PER_CPU(int, cmc_irq) = -1;
70static DEFINE_PER_CPU(int, cmcp_irq) = -1;
71static DEFINE_PER_CPU(int, cpep_irq) = -1;
72#define NAME_SIZE 15
73static DEFINE_PER_CPU(char[NAME_SIZE], timer_name);
74static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name);
75static DEFINE_PER_CPU(char[NAME_SIZE], resched_name);
76static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name);
77static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name);
78static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name);
79#undef NAME_SIZE
80
81struct saved_irq {
82 unsigned int irq;
83 struct irqaction *action;
84};
85/* 16 should be far optimistic value, since only several percpu irqs
86 * are registered early.
87 */
88#define MAX_LATE_IRQ 16
89static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
90static unsigned short late_irq_cnt;
91static unsigned short saved_irq_cnt;
92static int xen_slab_ready;
93
94#ifdef CONFIG_SMP
95/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
96 * it ends up to issue several memory accesses upon percpu data and
97 * thus adds unnecessary traffic to other paths.
98 */
99static irqreturn_t
100xen_dummy_handler(int irq, void *dev_id)
101{
102
103 return IRQ_HANDLED;
104}
105
106static struct irqaction xen_ipi_irqaction = {
107 .handler = handle_IPI,
108 .flags = IRQF_DISABLED,
109 .name = "IPI"
110};
111
112static struct irqaction xen_resched_irqaction = {
113 .handler = xen_dummy_handler,
114 .flags = IRQF_DISABLED,
115 .name = "resched"
116};
117
118static struct irqaction xen_tlb_irqaction = {
119 .handler = xen_dummy_handler,
120 .flags = IRQF_DISABLED,
121 .name = "tlb_flush"
122};
123#endif
124
125/*
126 * This is xen version percpu irq registration, which needs bind
127 * to xen specific evtchn sub-system. One trick here is that xen
128 * evtchn binding interface depends on kmalloc because related
129 * port needs to be freed at device/cpu down. So we cache the
130 * registration on BSP before slab is ready and then deal them
131 * at later point. For rest instances happening after slab ready,
132 * we hook them to xen evtchn immediately.
133 *
134 * FIXME: MCA is not supported by far, and thus "nomca" boot param is
135 * required.
136 */
137static void
138__xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
139 struct irqaction *action, int save)
140{
141 irq_desc_t *desc;
142 int irq = 0;
143
144 if (xen_slab_ready) {
145 switch (vec) {
146 case IA64_TIMER_VECTOR:
147 snprintf(per_cpu(timer_name, cpu),
148 sizeof(per_cpu(timer_name, cpu)),
149 "%s%d", action->name, cpu);
150 irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
151 action->handler, action->flags,
152 per_cpu(timer_name, cpu), action->dev_id);
153 per_cpu(timer_irq, cpu) = irq;
154 break;
155 case IA64_IPI_RESCHEDULE:
156 snprintf(per_cpu(resched_name, cpu),
157 sizeof(per_cpu(resched_name, cpu)),
158 "%s%d", action->name, cpu);
159 irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
160 action->handler, action->flags,
161 per_cpu(resched_name, cpu), action->dev_id);
162 per_cpu(resched_irq, cpu) = irq;
163 break;
164 case IA64_IPI_VECTOR:
165 snprintf(per_cpu(ipi_name, cpu),
166 sizeof(per_cpu(ipi_name, cpu)),
167 "%s%d", action->name, cpu);
168 irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
169 action->handler, action->flags,
170 per_cpu(ipi_name, cpu), action->dev_id);
171 per_cpu(ipi_irq, cpu) = irq;
172 break;
173 case IA64_CMC_VECTOR:
174 snprintf(per_cpu(cmc_name, cpu),
175 sizeof(per_cpu(cmc_name, cpu)),
176 "%s%d", action->name, cpu);
177 irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
178 action->handler,
179 action->flags,
180 per_cpu(cmc_name, cpu),
181 action->dev_id);
182 per_cpu(cmc_irq, cpu) = irq;
183 break;
184 case IA64_CMCP_VECTOR:
185 snprintf(per_cpu(cmcp_name, cpu),
186 sizeof(per_cpu(cmcp_name, cpu)),
187 "%s%d", action->name, cpu);
188 irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
189 action->handler,
190 action->flags,
191 per_cpu(cmcp_name, cpu),
192 action->dev_id);
193 per_cpu(cmcp_irq, cpu) = irq;
194 break;
195 case IA64_CPEP_VECTOR:
196 snprintf(per_cpu(cpep_name, cpu),
197 sizeof(per_cpu(cpep_name, cpu)),
198 "%s%d", action->name, cpu);
199 irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
200 action->handler,
201 action->flags,
202 per_cpu(cpep_name, cpu),
203 action->dev_id);
204 per_cpu(cpep_irq, cpu) = irq;
205 break;
206 case IA64_CPE_VECTOR:
207 case IA64_MCA_RENDEZ_VECTOR:
208 case IA64_PERFMON_VECTOR:
209 case IA64_MCA_WAKEUP_VECTOR:
210 case IA64_SPURIOUS_INT_VECTOR:
211 /* No need to complain, these aren't supported. */
212 break;
213 default:
214 printk(KERN_WARNING "Percpu irq %d is unsupported "
215 "by xen!\n", vec);
216 break;
217 }
218 BUG_ON(irq < 0);
219
220 if (irq > 0) {
221 /*
222 * Mark percpu. Without this, migrate_irqs() will
223 * mark the interrupt for migrations and trigger it
224 * on cpu hotplug.
225 */
226 desc = irq_desc + irq;
227 desc->status |= IRQ_PER_CPU;
228 }
229 }
230
231 /* For BSP, we cache registered percpu irqs, and then re-walk
232 * them when initializing APs
233 */
234 if (!cpu && save) {
235 BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
236 saved_percpu_irqs[saved_irq_cnt].irq = vec;
237 saved_percpu_irqs[saved_irq_cnt].action = action;
238 saved_irq_cnt++;
239 if (!xen_slab_ready)
240 late_irq_cnt++;
241 }
242}
243
244static void
245xen_register_percpu_irq(ia64_vector vec, struct irqaction *action)
246{
247 __xen_register_percpu_irq(smp_processor_id(), vec, action, 1);
248}
249
250static void
251xen_bind_early_percpu_irq(void)
252{
253 int i;
254
255 xen_slab_ready = 1;
256 /* There's no race when accessing this cached array, since only
257 * BSP will face with such step shortly
258 */
259 for (i = 0; i < late_irq_cnt; i++)
260 __xen_register_percpu_irq(smp_processor_id(),
261 saved_percpu_irqs[i].irq,
262 saved_percpu_irqs[i].action, 0);
263}
264
265/* FIXME: There's no obvious point to check whether slab is ready. So
266 * a hack is used here by utilizing a late time hook.
267 */
268
269#ifdef CONFIG_HOTPLUG_CPU
270static int __devinit
271unbind_evtchn_callback(struct notifier_block *nfb,
272 unsigned long action, void *hcpu)
273{
274 unsigned int cpu = (unsigned long)hcpu;
275
276 if (action == CPU_DEAD) {
277 /* Unregister evtchn. */
278 if (per_cpu(cpep_irq, cpu) >= 0) {
279 unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
280 per_cpu(cpep_irq, cpu) = -1;
281 }
282 if (per_cpu(cmcp_irq, cpu) >= 0) {
283 unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
284 per_cpu(cmcp_irq, cpu) = -1;
285 }
286 if (per_cpu(cmc_irq, cpu) >= 0) {
287 unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
288 per_cpu(cmc_irq, cpu) = -1;
289 }
290 if (per_cpu(ipi_irq, cpu) >= 0) {
291 unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL);
292 per_cpu(ipi_irq, cpu) = -1;
293 }
294 if (per_cpu(resched_irq, cpu) >= 0) {
295 unbind_from_irqhandler(per_cpu(resched_irq, cpu),
296 NULL);
297 per_cpu(resched_irq, cpu) = -1;
298 }
299 if (per_cpu(timer_irq, cpu) >= 0) {
300 unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
301 per_cpu(timer_irq, cpu) = -1;
302 }
303 }
304 return NOTIFY_OK;
305}
306
307static struct notifier_block unbind_evtchn_notifier = {
308 .notifier_call = unbind_evtchn_callback,
309 .priority = 0
310};
311#endif
312
313void xen_smp_intr_init_early(unsigned int cpu)
314{
315#ifdef CONFIG_SMP
316 unsigned int i;
317
318 for (i = 0; i < saved_irq_cnt; i++)
319 __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
320 saved_percpu_irqs[i].action, 0);
321#endif
322}
323
324void xen_smp_intr_init(void)
325{
326#ifdef CONFIG_SMP
327 unsigned int cpu = smp_processor_id();
328 struct callback_register event = {
329 .type = CALLBACKTYPE_event,
330 .address = { .ip = (unsigned long)&xen_event_callback },
331 };
332
333 if (cpu == 0) {
334 /* Initialization was already done for boot cpu. */
335#ifdef CONFIG_HOTPLUG_CPU
336 /* Register the notifier only once. */
337 register_cpu_notifier(&unbind_evtchn_notifier);
338#endif
339 return;
340 }
341
342 /* This should be piggyback when setup vcpu guest context */
343 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
344#endif /* CONFIG_SMP */
345}
346
347void __init
348xen_irq_init(void)
349{
350 struct callback_register event = {
351 .type = CALLBACKTYPE_event,
352 .address = { .ip = (unsigned long)&xen_event_callback },
353 };
354
355 xen_init_IRQ();
356 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
357 late_time_init = xen_bind_early_percpu_irq;
358}
359
360void
361xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
362{
363#ifdef CONFIG_SMP
364 /* TODO: we need to call vcpu_up here */
365 if (unlikely(vector == ap_wakeup_vector)) {
366 /* XXX
367 * This should be in __cpu_up(cpu) in ia64 smpboot.c
368 * like x86. But don't want to modify it,
369 * keep it untouched.
370 */
371 xen_smp_intr_init_early(cpu);
372
373 xen_send_ipi(cpu, vector);
374 /* vcpu_prepare_and_up(cpu); */
375 return;
376 }
377#endif
378
379 switch (vector) {
380 case IA64_IPI_VECTOR:
381 xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
382 break;
383 case IA64_IPI_RESCHEDULE:
384 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
385 break;
386 case IA64_CMCP_VECTOR:
387 xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
388 break;
389 case IA64_CPEP_VECTOR:
390 xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
391 break;
392 case IA64_TIMER_VECTOR: {
393 /* this is used only once by check_sal_cache_flush()
394 at boot time */
395 static int used = 0;
396 if (!used) {
397 xen_send_ipi(cpu, IA64_TIMER_VECTOR);
398 used = 1;
399 break;
400 }
401 /* fallthrough */
402 }
403 default:
404 printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
405 vector);
406 notify_remote_via_irq(0); /* defaults to 0 irq */
407 break;
408 }
409}
410
411static void __init
412xen_register_ipi(void)
413{
414#ifdef CONFIG_SMP
415 register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction);
416 register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction);
417 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction);
418#endif
419}
420
421static void
422xen_resend_irq(unsigned int vector)
423{
424 (void)resend_irq_on_evtchn(vector);
425}
426
427const struct pv_irq_ops xen_irq_ops __initdata = {
428 .register_ipi = xen_register_ipi,
429
430 .assign_irq_vector = xen_assign_irq_vector,
431 .free_irq_vector = xen_free_irq_vector,
432 .register_percpu_irq = xen_register_percpu_irq,
433
434 .resend_irq = xen_resend_irq,
435};
diff --git a/arch/ia64/xen/irq_xen.h b/arch/ia64/xen/irq_xen.h
new file mode 100644
index 000000000000..26110f330c87
--- /dev/null
+++ b/arch/ia64/xen/irq_xen.h
@@ -0,0 +1,34 @@
1/******************************************************************************
2 * arch/ia64/xen/irq_xen.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#ifndef IRQ_XEN_H
24#define IRQ_XEN_H
25
26extern void (*late_time_init)(void);
27extern char xen_event_callback;
28void __init xen_init_IRQ(void);
29
30extern const struct pv_irq_ops xen_irq_ops __initdata;
31extern void xen_smp_intr_init(void);
32extern void xen_send_ipi(int cpu, int vec);
33
34#endif /* IRQ_XEN_H */
diff --git a/arch/ia64/xen/machvec.c b/arch/ia64/xen/machvec.c
new file mode 100644
index 000000000000..4ad588a7c279
--- /dev/null
+++ b/arch/ia64/xen/machvec.c
@@ -0,0 +1,4 @@
1#define MACHVEC_PLATFORM_NAME xen
2#define MACHVEC_PLATFORM_HEADER <asm/machvec_xen.h>
3#include <asm/machvec_init.h>
4
diff --git a/arch/ia64/xen/suspend.c b/arch/ia64/xen/suspend.c
new file mode 100644
index 000000000000..fd66b048c6fa
--- /dev/null
+++ b/arch/ia64/xen/suspend.c
@@ -0,0 +1,64 @@
1/******************************************************************************
2 * arch/ia64/xen/suspend.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * suspend/resume
22 */
23
24#include <xen/xen-ops.h>
25#include <asm/xen/hypervisor.h>
26#include "time.h"
27
28void
29xen_mm_pin_all(void)
30{
31 /* nothing */
32}
33
34void
35xen_mm_unpin_all(void)
36{
37 /* nothing */
38}
39
40void xen_pre_device_suspend(void)
41{
42 /* nothing */
43}
44
45void
46xen_pre_suspend()
47{
48 /* nothing */
49}
50
51void
52xen_post_suspend(int suspend_cancelled)
53{
54 if (suspend_cancelled)
55 return;
56
57 xen_ia64_enable_opt_feature();
58 /* add more if necessary */
59}
60
61void xen_arch_resume(void)
62{
63 xen_timer_resume_on_aps();
64}
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
new file mode 100644
index 000000000000..d15a94c330fb
--- /dev/null
+++ b/arch/ia64/xen/time.c
@@ -0,0 +1,213 @@
1/******************************************************************************
2 * arch/ia64/xen/time.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/delay.h>
24#include <linux/kernel_stat.h>
25#include <linux/posix-timers.h>
26#include <linux/irq.h>
27#include <linux/clocksource.h>
28
29#include <asm/timex.h>
30
31#include <asm/xen/hypervisor.h>
32
33#include <xen/interface/vcpu.h>
34
35#include "../kernel/fsyscall_gtod_data.h"
36
37DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
38DEFINE_PER_CPU(unsigned long, processed_stolen_time);
39DEFINE_PER_CPU(unsigned long, processed_blocked_time);
40
41/* taken from i386/kernel/time-xen.c */
42static void xen_init_missing_ticks_accounting(int cpu)
43{
44 struct vcpu_register_runstate_memory_area area;
45 struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
46 int rc;
47
48 memset(runstate, 0, sizeof(*runstate));
49
50 area.addr.v = runstate;
51 rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu,
52 &area);
53 WARN_ON(rc && rc != -ENOSYS);
54
55 per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
56 per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
57 + runstate->time[RUNSTATE_offline];
58}
59
60/*
61 * Runstate accounting
62 */
63/* stolen from arch/x86/xen/time.c */
64static void get_runstate_snapshot(struct vcpu_runstate_info *res)
65{
66 u64 state_time;
67 struct vcpu_runstate_info *state;
68
69 BUG_ON(preemptible());
70
71 state = &__get_cpu_var(runstate);
72
73 /*
74 * The runstate info is always updated by the hypervisor on
75 * the current CPU, so there's no need to use anything
76 * stronger than a compiler barrier when fetching it.
77 */
78 do {
79 state_time = state->state_entry_time;
80 rmb();
81 *res = *state;
82 rmb();
83 } while (state->state_entry_time != state_time);
84}
85
86#define NS_PER_TICK (1000000000LL/HZ)
87
88static unsigned long
89consider_steal_time(unsigned long new_itm)
90{
91 unsigned long stolen, blocked;
92 unsigned long delta_itm = 0, stolentick = 0;
93 int cpu = smp_processor_id();
94 struct vcpu_runstate_info runstate;
95 struct task_struct *p = current;
96
97 get_runstate_snapshot(&runstate);
98
99 /*
100 * Check for vcpu migration effect
101 * In this case, itc value is reversed.
102 * This causes huge stolen value.
103 * This function just checks and reject this effect.
104 */
105 if (!time_after_eq(runstate.time[RUNSTATE_blocked],
106 per_cpu(processed_blocked_time, cpu)))
107 blocked = 0;
108
109 if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
110 runstate.time[RUNSTATE_offline],
111 per_cpu(processed_stolen_time, cpu)))
112 stolen = 0;
113
114 if (!time_after(delta_itm + new_itm, ia64_get_itc()))
115 stolentick = ia64_get_itc() - new_itm;
116
117 do_div(stolentick, NS_PER_TICK);
118 stolentick++;
119
120 do_div(stolen, NS_PER_TICK);
121
122 if (stolen > stolentick)
123 stolen = stolentick;
124
125 stolentick -= stolen;
126 do_div(blocked, NS_PER_TICK);
127
128 if (blocked > stolentick)
129 blocked = stolentick;
130
131 if (stolen > 0 || blocked > 0) {
132 account_steal_time(NULL, jiffies_to_cputime(stolen));
133 account_steal_time(idle_task(cpu), jiffies_to_cputime(blocked));
134 run_local_timers();
135
136 if (rcu_pending(cpu))
137 rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
138
139 scheduler_tick();
140 run_posix_cpu_timers(p);
141 delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
142
143 if (cpu == time_keeper_id) {
144 write_seqlock(&xtime_lock);
145 do_timer(stolen + blocked);
146 local_cpu_data->itm_next = delta_itm + new_itm;
147 write_sequnlock(&xtime_lock);
148 } else {
149 local_cpu_data->itm_next = delta_itm + new_itm;
150 }
151 per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen;
152 per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked;
153 }
154 return delta_itm;
155}
156
157static int xen_do_steal_accounting(unsigned long *new_itm)
158{
159 unsigned long delta_itm;
160 delta_itm = consider_steal_time(*new_itm);
161 *new_itm += delta_itm;
162 if (time_after(*new_itm, ia64_get_itc()) && delta_itm)
163 return 1;
164
165 return 0;
166}
167
168static void xen_itc_jitter_data_reset(void)
169{
170 u64 lcycle, ret;
171
172 do {
173 lcycle = itc_jitter_data.itc_lastcycle;
174 ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, 0);
175 } while (unlikely(ret != lcycle));
176}
177
178struct pv_time_ops xen_time_ops __initdata = {
179 .init_missing_ticks_accounting = xen_init_missing_ticks_accounting,
180 .do_steal_accounting = xen_do_steal_accounting,
181 .clocksource_resume = xen_itc_jitter_data_reset,
182};
183
184/* Called after suspend, to resume time. */
185static void xen_local_tick_resume(void)
186{
187 /* Just trigger a tick. */
188 ia64_cpu_local_tick();
189 touch_softlockup_watchdog();
190}
191
192void
193xen_timer_resume(void)
194{
195 unsigned int cpu;
196
197 xen_local_tick_resume();
198
199 for_each_online_cpu(cpu)
200 xen_init_missing_ticks_accounting(cpu);
201}
202
203static void ia64_cpu_local_tick_fn(void *unused)
204{
205 xen_local_tick_resume();
206 xen_init_missing_ticks_accounting(smp_processor_id());
207}
208
209void
210xen_timer_resume_on_aps(void)
211{
212 smp_call_function(&ia64_cpu_local_tick_fn, NULL, 1);
213}
diff --git a/arch/ia64/xen/time.h b/arch/ia64/xen/time.h
new file mode 100644
index 000000000000..f98d7e1a42f0
--- /dev/null
+++ b/arch/ia64/xen/time.h
@@ -0,0 +1,24 @@
1/******************************************************************************
2 * arch/ia64/xen/time.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23extern struct pv_time_ops xen_time_ops __initdata;
24void xen_timer_resume_on_aps(void);
diff --git a/arch/ia64/xen/xcom_hcall.c b/arch/ia64/xen/xcom_hcall.c
new file mode 100644
index 000000000000..ccaf7431f7c8
--- /dev/null
+++ b/arch/ia64/xen/xcom_hcall.c
@@ -0,0 +1,441 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Tristan Gingold <tristan.gingold@bull.net>
17 *
18 * Copyright (c) 2007
19 * Isaku Yamahata <yamahata at valinux co jp>
20 * VA Linux Systems Japan K.K.
21 * consolidate mini and inline version.
22 */
23
24#include <linux/module.h>
25#include <xen/interface/xen.h>
26#include <xen/interface/memory.h>
27#include <xen/interface/grant_table.h>
28#include <xen/interface/callback.h>
29#include <xen/interface/vcpu.h>
30#include <asm/xen/hypervisor.h>
31#include <asm/xen/xencomm.h>
32
33/* Xencomm notes:
34 * This file defines hypercalls to be used by xencomm. The hypercalls simply
35 * create inlines or mini descriptors for pointers and then call the raw arch
36 * hypercall xencomm_arch_hypercall_XXX
37 *
38 * If the arch wants to directly use these hypercalls, simply define macros
39 * in asm/xen/hypercall.h, eg:
40 * #define HYPERVISOR_sched_op xencomm_hypercall_sched_op
41 *
42 * The arch may also define HYPERVISOR_xxx as a function and do more operations
43 * before/after doing the hypercall.
44 *
45 * Note: because only inline or mini descriptors are created these functions
46 * must only be called with in kernel memory parameters.
47 */
48
49int
50xencomm_hypercall_console_io(int cmd, int count, char *str)
51{
52 /* xen early printk uses console io hypercall before
53 * xencomm initialization. In that case, we just ignore it.
54 */
55 if (!xencomm_is_initialized())
56 return 0;
57
58 return xencomm_arch_hypercall_console_io
59 (cmd, count, xencomm_map_no_alloc(str, count));
60}
61EXPORT_SYMBOL_GPL(xencomm_hypercall_console_io);
62
63int
64xencomm_hypercall_event_channel_op(int cmd, void *op)
65{
66 struct xencomm_handle *desc;
67 desc = xencomm_map_no_alloc(op, sizeof(struct evtchn_op));
68 if (desc == NULL)
69 return -EINVAL;
70
71 return xencomm_arch_hypercall_event_channel_op(cmd, desc);
72}
73EXPORT_SYMBOL_GPL(xencomm_hypercall_event_channel_op);
74
75int
76xencomm_hypercall_xen_version(int cmd, void *arg)
77{
78 struct xencomm_handle *desc;
79 unsigned int argsize;
80
81 switch (cmd) {
82 case XENVER_version:
83 /* do not actually pass an argument */
84 return xencomm_arch_hypercall_xen_version(cmd, 0);
85 case XENVER_extraversion:
86 argsize = sizeof(struct xen_extraversion);
87 break;
88 case XENVER_compile_info:
89 argsize = sizeof(struct xen_compile_info);
90 break;
91 case XENVER_capabilities:
92 argsize = sizeof(struct xen_capabilities_info);
93 break;
94 case XENVER_changeset:
95 argsize = sizeof(struct xen_changeset_info);
96 break;
97 case XENVER_platform_parameters:
98 argsize = sizeof(struct xen_platform_parameters);
99 break;
100 case XENVER_get_features:
101 argsize = (arg == NULL) ? 0 : sizeof(struct xen_feature_info);
102 break;
103
104 default:
105 printk(KERN_DEBUG
106 "%s: unknown version op %d\n", __func__, cmd);
107 return -ENOSYS;
108 }
109
110 desc = xencomm_map_no_alloc(arg, argsize);
111 if (desc == NULL)
112 return -EINVAL;
113
114 return xencomm_arch_hypercall_xen_version(cmd, desc);
115}
116EXPORT_SYMBOL_GPL(xencomm_hypercall_xen_version);
117
118int
119xencomm_hypercall_physdev_op(int cmd, void *op)
120{
121 unsigned int argsize;
122
123 switch (cmd) {
124 case PHYSDEVOP_apic_read:
125 case PHYSDEVOP_apic_write:
126 argsize = sizeof(struct physdev_apic);
127 break;
128 case PHYSDEVOP_alloc_irq_vector:
129 case PHYSDEVOP_free_irq_vector:
130 argsize = sizeof(struct physdev_irq);
131 break;
132 case PHYSDEVOP_irq_status_query:
133 argsize = sizeof(struct physdev_irq_status_query);
134 break;
135
136 default:
137 printk(KERN_DEBUG
138 "%s: unknown physdev op %d\n", __func__, cmd);
139 return -ENOSYS;
140 }
141
142 return xencomm_arch_hypercall_physdev_op
143 (cmd, xencomm_map_no_alloc(op, argsize));
144}
145
146static int
147xencommize_grant_table_op(struct xencomm_mini **xc_area,
148 unsigned int cmd, void *op, unsigned int count,
149 struct xencomm_handle **desc)
150{
151 struct xencomm_handle *desc1;
152 unsigned int argsize;
153
154 switch (cmd) {
155 case GNTTABOP_map_grant_ref:
156 argsize = sizeof(struct gnttab_map_grant_ref);
157 break;
158 case GNTTABOP_unmap_grant_ref:
159 argsize = sizeof(struct gnttab_unmap_grant_ref);
160 break;
161 case GNTTABOP_setup_table:
162 {
163 struct gnttab_setup_table *setup = op;
164
165 argsize = sizeof(*setup);
166
167 if (count != 1)
168 return -EINVAL;
169 desc1 = __xencomm_map_no_alloc
170 (xen_guest_handle(setup->frame_list),
171 setup->nr_frames *
172 sizeof(*xen_guest_handle(setup->frame_list)),
173 *xc_area);
174 if (desc1 == NULL)
175 return -EINVAL;
176 (*xc_area)++;
177 set_xen_guest_handle(setup->frame_list, (void *)desc1);
178 break;
179 }
180 case GNTTABOP_dump_table:
181 argsize = sizeof(struct gnttab_dump_table);
182 break;
183 case GNTTABOP_transfer:
184 argsize = sizeof(struct gnttab_transfer);
185 break;
186 case GNTTABOP_copy:
187 argsize = sizeof(struct gnttab_copy);
188 break;
189 case GNTTABOP_query_size:
190 argsize = sizeof(struct gnttab_query_size);
191 break;
192 default:
193 printk(KERN_DEBUG "%s: unknown hypercall grant table op %d\n",
194 __func__, cmd);
195 BUG();
196 }
197
198 *desc = __xencomm_map_no_alloc(op, count * argsize, *xc_area);
199 if (*desc == NULL)
200 return -EINVAL;
201 (*xc_area)++;
202
203 return 0;
204}
205
206int
207xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
208 unsigned int count)
209{
210 int rc;
211 struct xencomm_handle *desc;
212 XENCOMM_MINI_ALIGNED(xc_area, 2);
213
214 rc = xencommize_grant_table_op(&xc_area, cmd, op, count, &desc);
215 if (rc)
216 return rc;
217
218 return xencomm_arch_hypercall_grant_table_op(cmd, desc, count);
219}
220EXPORT_SYMBOL_GPL(xencomm_hypercall_grant_table_op);
221
222int
223xencomm_hypercall_sched_op(int cmd, void *arg)
224{
225 struct xencomm_handle *desc;
226 unsigned int argsize;
227
228 switch (cmd) {
229 case SCHEDOP_yield:
230 case SCHEDOP_block:
231 argsize = 0;
232 break;
233 case SCHEDOP_shutdown:
234 argsize = sizeof(struct sched_shutdown);
235 break;
236 case SCHEDOP_poll:
237 {
238 struct sched_poll *poll = arg;
239 struct xencomm_handle *ports;
240
241 argsize = sizeof(struct sched_poll);
242 ports = xencomm_map_no_alloc(xen_guest_handle(poll->ports),
243 sizeof(*xen_guest_handle(poll->ports)));
244
245 set_xen_guest_handle(poll->ports, (void *)ports);
246 break;
247 }
248 default:
249 printk(KERN_DEBUG "%s: unknown sched op %d\n", __func__, cmd);
250 return -ENOSYS;
251 }
252
253 desc = xencomm_map_no_alloc(arg, argsize);
254 if (desc == NULL)
255 return -EINVAL;
256
257 return xencomm_arch_hypercall_sched_op(cmd, desc);
258}
259EXPORT_SYMBOL_GPL(xencomm_hypercall_sched_op);
260
261int
262xencomm_hypercall_multicall(void *call_list, int nr_calls)
263{
264 int rc;
265 int i;
266 struct multicall_entry *mce;
267 struct xencomm_handle *desc;
268 XENCOMM_MINI_ALIGNED(xc_area, nr_calls * 2);
269
270 for (i = 0; i < nr_calls; i++) {
271 mce = (struct multicall_entry *)call_list + i;
272
273 switch (mce->op) {
274 case __HYPERVISOR_update_va_mapping:
275 case __HYPERVISOR_mmu_update:
276 /* No-op on ia64. */
277 break;
278 case __HYPERVISOR_grant_table_op:
279 rc = xencommize_grant_table_op
280 (&xc_area,
281 mce->args[0], (void *)mce->args[1],
282 mce->args[2], &desc);
283 if (rc)
284 return rc;
285 mce->args[1] = (unsigned long)desc;
286 break;
287 case __HYPERVISOR_memory_op:
288 default:
289 printk(KERN_DEBUG
290 "%s: unhandled multicall op entry op %lu\n",
291 __func__, mce->op);
292 return -ENOSYS;
293 }
294 }
295
296 desc = xencomm_map_no_alloc(call_list,
297 nr_calls * sizeof(struct multicall_entry));
298 if (desc == NULL)
299 return -EINVAL;
300
301 return xencomm_arch_hypercall_multicall(desc, nr_calls);
302}
303EXPORT_SYMBOL_GPL(xencomm_hypercall_multicall);
304
305int
306xencomm_hypercall_callback_op(int cmd, void *arg)
307{
308 unsigned int argsize;
309 switch (cmd) {
310 case CALLBACKOP_register:
311 argsize = sizeof(struct callback_register);
312 break;
313 case CALLBACKOP_unregister:
314 argsize = sizeof(struct callback_unregister);
315 break;
316 default:
317 printk(KERN_DEBUG
318 "%s: unknown callback op %d\n", __func__, cmd);
319 return -ENOSYS;
320 }
321
322 return xencomm_arch_hypercall_callback_op
323 (cmd, xencomm_map_no_alloc(arg, argsize));
324}
325
326static int
327xencommize_memory_reservation(struct xencomm_mini *xc_area,
328 struct xen_memory_reservation *mop)
329{
330 struct xencomm_handle *desc;
331
332 desc = __xencomm_map_no_alloc(xen_guest_handle(mop->extent_start),
333 mop->nr_extents *
334 sizeof(*xen_guest_handle(mop->extent_start)),
335 xc_area);
336 if (desc == NULL)
337 return -EINVAL;
338
339 set_xen_guest_handle(mop->extent_start, (void *)desc);
340 return 0;
341}
342
343int
344xencomm_hypercall_memory_op(unsigned int cmd, void *arg)
345{
346 GUEST_HANDLE(xen_pfn_t) extent_start_va[2] = { {NULL}, {NULL} };
347 struct xen_memory_reservation *xmr = NULL;
348 int rc;
349 struct xencomm_handle *desc;
350 unsigned int argsize;
351 XENCOMM_MINI_ALIGNED(xc_area, 2);
352
353 switch (cmd) {
354 case XENMEM_increase_reservation:
355 case XENMEM_decrease_reservation:
356 case XENMEM_populate_physmap:
357 xmr = (struct xen_memory_reservation *)arg;
358 set_xen_guest_handle(extent_start_va[0],
359 xen_guest_handle(xmr->extent_start));
360
361 argsize = sizeof(*xmr);
362 rc = xencommize_memory_reservation(xc_area, xmr);
363 if (rc)
364 return rc;
365 xc_area++;
366 break;
367
368 case XENMEM_maximum_ram_page:
369 argsize = 0;
370 break;
371
372 case XENMEM_add_to_physmap:
373 argsize = sizeof(struct xen_add_to_physmap);
374 break;
375
376 default:
377 printk(KERN_DEBUG "%s: unknown memory op %d\n", __func__, cmd);
378 return -ENOSYS;
379 }
380
381 desc = xencomm_map_no_alloc(arg, argsize);
382 if (desc == NULL)
383 return -EINVAL;
384
385 rc = xencomm_arch_hypercall_memory_op(cmd, desc);
386
387 switch (cmd) {
388 case XENMEM_increase_reservation:
389 case XENMEM_decrease_reservation:
390 case XENMEM_populate_physmap:
391 set_xen_guest_handle(xmr->extent_start,
392 xen_guest_handle(extent_start_va[0]));
393 break;
394 }
395
396 return rc;
397}
398EXPORT_SYMBOL_GPL(xencomm_hypercall_memory_op);
399
400int
401xencomm_hypercall_suspend(unsigned long srec)
402{
403 struct sched_shutdown arg;
404
405 arg.reason = SHUTDOWN_suspend;
406
407 return xencomm_arch_hypercall_sched_op(
408 SCHEDOP_shutdown, xencomm_map_no_alloc(&arg, sizeof(arg)));
409}
410
411long
412xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg)
413{
414 unsigned int argsize;
415 switch (cmd) {
416 case VCPUOP_register_runstate_memory_area: {
417 struct vcpu_register_runstate_memory_area *area =
418 (struct vcpu_register_runstate_memory_area *)arg;
419 argsize = sizeof(*arg);
420 set_xen_guest_handle(area->addr.h,
421 (void *)xencomm_map_no_alloc(area->addr.v,
422 sizeof(area->addr.v)));
423 break;
424 }
425
426 default:
427 printk(KERN_DEBUG "%s: unknown vcpu op %d\n", __func__, cmd);
428 return -ENOSYS;
429 }
430
431 return xencomm_arch_hypercall_vcpu_op(cmd, cpu,
432 xencomm_map_no_alloc(arg, argsize));
433}
434
435long
436xencomm_hypercall_opt_feature(void *arg)
437{
438 return xencomm_arch_hypercall_opt_feature(
439 xencomm_map_no_alloc(arg,
440 sizeof(struct xen_ia64_opt_feature)));
441}
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
new file mode 100644
index 000000000000..04cd12350455
--- /dev/null
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -0,0 +1,364 @@
1/******************************************************************************
2 * arch/ia64/xen/xen_pv_ops.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/console.h>
24#include <linux/irq.h>
25#include <linux/kernel.h>
26#include <linux/pm.h>
27
28#include <asm/xen/hypervisor.h>
29#include <asm/xen/xencomm.h>
30#include <asm/xen/privop.h>
31
32#include "irq_xen.h"
33#include "time.h"
34
35/***************************************************************************
36 * general info
37 */
38static struct pv_info xen_info __initdata = {
39 .kernel_rpl = 2, /* or 1: determin at runtime */
40 .paravirt_enabled = 1,
41 .name = "Xen/ia64",
42};
43
44#define IA64_RSC_PL_SHIFT 2
45#define IA64_RSC_PL_BIT_SIZE 2
46#define IA64_RSC_PL_MASK \
47 (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT)
48
49static void __init
50xen_info_init(void)
51{
52 /* Xenified Linux/ia64 may run on pl = 1 or 2.
53 * determin at run time. */
54 unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC);
55 unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT;
56 xen_info.kernel_rpl = rpl;
57}
58
59/***************************************************************************
60 * pv_init_ops
61 * initialization hooks.
62 */
63
64static void
65xen_panic_hypercall(struct unw_frame_info *info, void *arg)
66{
67 current->thread.ksp = (__u64)info->sw - 16;
68 HYPERVISOR_shutdown(SHUTDOWN_crash);
69 /* we're never actually going to get here... */
70}
71
72static int
73xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
74{
75 unw_init_running(xen_panic_hypercall, NULL);
76 /* we're never actually going to get here... */
77 return NOTIFY_DONE;
78}
79
80static struct notifier_block xen_panic_block = {
81 xen_panic_event, NULL, 0 /* try to go last */
82};
83
84static void xen_pm_power_off(void)
85{
86 local_irq_disable();
87 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
88}
89
90static void __init
91xen_banner(void)
92{
93 printk(KERN_INFO
94 "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld "
95 "flags=0x%x\n",
96 xen_info.kernel_rpl,
97 HYPERVISOR_shared_info->arch.start_info_pfn,
98 xen_start_info->nr_pages, xen_start_info->flags);
99}
100
101static int __init
102xen_reserve_memory(struct rsvd_region *region)
103{
104 region->start = (unsigned long)__va(
105 (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
106 region->end = region->start + PAGE_SIZE;
107 return 1;
108}
109
110static void __init
111xen_arch_setup_early(void)
112{
113 struct shared_info *s;
114 BUG_ON(!xen_pv_domain());
115
116 s = HYPERVISOR_shared_info;
117 xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
118
119 /* Must be done before any hypercall. */
120 xencomm_initialize();
121
122 xen_setup_features();
123 /* Register a call for panic conditions. */
124 atomic_notifier_chain_register(&panic_notifier_list,
125 &xen_panic_block);
126 pm_power_off = xen_pm_power_off;
127
128 xen_ia64_enable_opt_feature();
129}
130
131static void __init
132xen_arch_setup_console(char **cmdline_p)
133{
134 add_preferred_console("xenboot", 0, NULL);
135 add_preferred_console("tty", 0, NULL);
136 /* use hvc_xen */
137 add_preferred_console("hvc", 0, NULL);
138
139#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
140 conswitchp = NULL;
141#endif
142}
143
144static int __init
145xen_arch_setup_nomca(void)
146{
147 return 1;
148}
149
150static void __init
151xen_post_smp_prepare_boot_cpu(void)
152{
153 xen_setup_vcpu_info_placement();
154}
155
156static const struct pv_init_ops xen_init_ops __initdata = {
157 .banner = xen_banner,
158
159 .reserve_memory = xen_reserve_memory,
160
161 .arch_setup_early = xen_arch_setup_early,
162 .arch_setup_console = xen_arch_setup_console,
163 .arch_setup_nomca = xen_arch_setup_nomca,
164
165 .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
166};
167
168/***************************************************************************
169 * pv_cpu_ops
170 * intrinsics hooks.
171 */
172
173static void xen_setreg(int regnum, unsigned long val)
174{
175 switch (regnum) {
176 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
177 xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
178 break;
179#ifdef CONFIG_IA32_SUPPORT
180 case _IA64_REG_AR_EFLAG:
181 xen_set_eflag(val);
182 break;
183#endif
184 case _IA64_REG_CR_TPR:
185 xen_set_tpr(val);
186 break;
187 case _IA64_REG_CR_ITM:
188 xen_set_itm(val);
189 break;
190 case _IA64_REG_CR_EOI:
191 xen_eoi(val);
192 break;
193 default:
194 ia64_native_setreg_func(regnum, val);
195 break;
196 }
197}
198
199static unsigned long xen_getreg(int regnum)
200{
201 unsigned long res;
202
203 switch (regnum) {
204 case _IA64_REG_PSR:
205 res = xen_get_psr();
206 break;
207#ifdef CONFIG_IA32_SUPPORT
208 case _IA64_REG_AR_EFLAG:
209 res = xen_get_eflag();
210 break;
211#endif
212 case _IA64_REG_CR_IVR:
213 res = xen_get_ivr();
214 break;
215 case _IA64_REG_CR_TPR:
216 res = xen_get_tpr();
217 break;
218 default:
219 res = ia64_native_getreg_func(regnum);
220 break;
221 }
222 return res;
223}
224
225/* turning on interrupts is a bit more complicated.. write to the
226 * memory-mapped virtual psr.i bit first (to avoid race condition),
227 * then if any interrupts were pending, we have to execute a hyperprivop
228 * to ensure the pending interrupt gets delivered; else we're done! */
229static void
230xen_ssm_i(void)
231{
232 int old = xen_get_virtual_psr_i();
233 xen_set_virtual_psr_i(1);
234 barrier();
235 if (!old && xen_get_virtual_pend())
236 xen_hyper_ssm_i();
237}
238
239/* turning off interrupts can be paravirtualized simply by writing
240 * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
241static void
242xen_rsm_i(void)
243{
244 xen_set_virtual_psr_i(0);
245 barrier();
246}
247
248static unsigned long
249xen_get_psr_i(void)
250{
251 return xen_get_virtual_psr_i() ? IA64_PSR_I : 0;
252}
253
254static void
255xen_intrin_local_irq_restore(unsigned long mask)
256{
257 if (mask & IA64_PSR_I)
258 xen_ssm_i();
259 else
260 xen_rsm_i();
261}
262
263static const struct pv_cpu_ops xen_cpu_ops __initdata = {
264 .fc = xen_fc,
265 .thash = xen_thash,
266 .get_cpuid = xen_get_cpuid,
267 .get_pmd = xen_get_pmd,
268 .getreg = xen_getreg,
269 .setreg = xen_setreg,
270 .ptcga = xen_ptcga,
271 .get_rr = xen_get_rr,
272 .set_rr = xen_set_rr,
273 .set_rr0_to_rr4 = xen_set_rr0_to_rr4,
274 .ssm_i = xen_ssm_i,
275 .rsm_i = xen_rsm_i,
276 .get_psr_i = xen_get_psr_i,
277 .intrin_local_irq_restore
278 = xen_intrin_local_irq_restore,
279};
280
281/******************************************************************************
282 * replacement of hand written assembly codes.
283 */
284
285extern char xen_switch_to;
286extern char xen_leave_syscall;
287extern char xen_work_processed_syscall;
288extern char xen_leave_kernel;
289
290const struct pv_cpu_asm_switch xen_cpu_asm_switch = {
291 .switch_to = (unsigned long)&xen_switch_to,
292 .leave_syscall = (unsigned long)&xen_leave_syscall,
293 .work_processed_syscall = (unsigned long)&xen_work_processed_syscall,
294 .leave_kernel = (unsigned long)&xen_leave_kernel,
295};
296
297/***************************************************************************
298 * pv_iosapic_ops
299 * iosapic read/write hooks.
300 */
301static void
302xen_pcat_compat_init(void)
303{
304 /* nothing */
305}
306
307static struct irq_chip*
308xen_iosapic_get_irq_chip(unsigned long trigger)
309{
310 return NULL;
311}
312
313static unsigned int
314xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
315{
316 struct physdev_apic apic_op;
317 int ret;
318
319 apic_op.apic_physbase = (unsigned long)iosapic -
320 __IA64_UNCACHED_OFFSET;
321 apic_op.reg = reg;
322 ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
323 if (ret)
324 return ret;
325 return apic_op.value;
326}
327
328static void
329xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
330{
331 struct physdev_apic apic_op;
332
333 apic_op.apic_physbase = (unsigned long)iosapic -
334 __IA64_UNCACHED_OFFSET;
335 apic_op.reg = reg;
336 apic_op.value = val;
337 HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
338}
339
340static const struct pv_iosapic_ops xen_iosapic_ops __initdata = {
341 .pcat_compat_init = xen_pcat_compat_init,
342 .__get_irq_chip = xen_iosapic_get_irq_chip,
343
344 .__read = xen_iosapic_read,
345 .__write = xen_iosapic_write,
346};
347
348/***************************************************************************
349 * pv_ops initialization
350 */
351
352void __init
353xen_setup_pv_ops(void)
354{
355 xen_info_init();
356 pv_info = xen_info;
357 pv_init_ops = xen_init_ops;
358 pv_cpu_ops = xen_cpu_ops;
359 pv_iosapic_ops = xen_iosapic_ops;
360 pv_irq_ops = xen_irq_ops;
361 pv_time_ops = xen_time_ops;
362
363 paravirt_cpu_asm_init(&xen_cpu_asm_switch);
364}
diff --git a/arch/ia64/xen/xencomm.c b/arch/ia64/xen/xencomm.c
new file mode 100644
index 000000000000..1f5d7ac82e97
--- /dev/null
+++ b/arch/ia64/xen/xencomm.c
@@ -0,0 +1,105 @@
1/*
2 * Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/mm.h>
20
21static unsigned long kernel_virtual_offset;
22static int is_xencomm_initialized;
23
24/* for xen early printk. It uses console io hypercall which uses xencomm.
25 * However early printk may use it before xencomm initialization.
26 */
27int
28xencomm_is_initialized(void)
29{
30 return is_xencomm_initialized;
31}
32
33void
34xencomm_initialize(void)
35{
36 kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START);
37 is_xencomm_initialized = 1;
38}
39
40/* Translate virtual address to physical address. */
41unsigned long
42xencomm_vtop(unsigned long vaddr)
43{
44 struct page *page;
45 struct vm_area_struct *vma;
46
47 if (vaddr == 0)
48 return 0UL;
49
50 if (REGION_NUMBER(vaddr) == 5) {
51 pgd_t *pgd;
52 pud_t *pud;
53 pmd_t *pmd;
54 pte_t *ptep;
55
56 /* On ia64, TASK_SIZE refers to current. It is not initialized
57 during boot.
58 Furthermore the kernel is relocatable and __pa() doesn't
59 work on addresses. */
60 if (vaddr >= KERNEL_START
61 && vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE))
62 return vaddr - kernel_virtual_offset;
63
64 /* In kernel area -- virtually mapped. */
65 pgd = pgd_offset_k(vaddr);
66 if (pgd_none(*pgd) || pgd_bad(*pgd))
67 return ~0UL;
68
69 pud = pud_offset(pgd, vaddr);
70 if (pud_none(*pud) || pud_bad(*pud))
71 return ~0UL;
72
73 pmd = pmd_offset(pud, vaddr);
74 if (pmd_none(*pmd) || pmd_bad(*pmd))
75 return ~0UL;
76
77 ptep = pte_offset_kernel(pmd, vaddr);
78 if (!ptep)
79 return ~0UL;
80
81 return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
82 }
83
84 if (vaddr > TASK_SIZE) {
85 /* percpu variables */
86 if (REGION_NUMBER(vaddr) == 7 &&
87 REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS))
88 ia64_tpa(vaddr);
89
90 /* kernel address */
91 return __pa(vaddr);
92 }
93
94 /* XXX double-check (lack of) locking */
95 vma = find_extend_vma(current->mm, vaddr);
96 if (!vma)
97 return ~0UL;
98
99 /* We assume the page is modified. */
100 page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
101 if (!page)
102 return ~0UL;
103
104 return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
105}
diff --git a/arch/ia64/xen/xenivt.S b/arch/ia64/xen/xenivt.S
new file mode 100644
index 000000000000..3e71d50584d9
--- /dev/null
+++ b/arch/ia64/xen/xenivt.S
@@ -0,0 +1,52 @@
1/*
2 * arch/ia64/xen/ivt.S
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 *
7 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
8 * VA Linux Systems Japan K.K.
9 * pv_ops.
10 */
11
12#include <asm/asmmacro.h>
13#include <asm/kregs.h>
14#include <asm/pgtable.h>
15
16#include "../kernel/minstate.h"
17
18 .section .text,"ax"
19GLOBAL_ENTRY(xen_event_callback)
20 mov r31=pr // prepare to save predicates
21 ;;
22 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
23 ;;
24 movl r3=XSI_PSR_IC
25 mov r14=1
26 ;;
27 st4 [r3]=r14
28 ;;
29 adds r3=8,r2 // set up second base pointer for SAVE_REST
30 srlz.i // ensure everybody knows psr.ic is back on
31 ;;
32 SAVE_REST
33 ;;
341:
35 alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
36 add out0=16,sp // pass pointer to pt_regs as first arg
37 ;;
38 br.call.sptk.many b0=xen_evtchn_do_upcall
39 ;;
40 movl r20=XSI_PSR_I_ADDR
41 ;;
42 ld8 r20=[r20]
43 ;;
44 adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending
45 ;;
46 ld1 r20=[r20]
47 ;;
48 cmp.ne p6,p0=r20,r0 // if there are pending events,
49 (p6) br.spnt.few 1b // call evtchn_do_upcall again.
50 br.sptk.many xen_leave_kernel // we know ia64_leave_kernel is
51 // paravirtualized as xen_leave_kernel
52END(xen_event_callback)
diff --git a/arch/ia64/xen/xensetup.S b/arch/ia64/xen/xensetup.S
new file mode 100644
index 000000000000..28fed1fcc079
--- /dev/null
+++ b/arch/ia64/xen/xensetup.S
@@ -0,0 +1,83 @@
1/*
2 * Support routines for Xen
3 *
4 * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
5 */
6
7#include <asm/processor.h>
8#include <asm/asmmacro.h>
9#include <asm/pgtable.h>
10#include <asm/system.h>
11#include <asm/paravirt.h>
12#include <asm/xen/privop.h>
13#include <linux/elfnote.h>
14#include <linux/init.h>
15#include <xen/interface/elfnote.h>
16
17 .section .data.read_mostly
18 .align 8
19 .global xen_domain_type
20xen_domain_type:
21 data4 XEN_NATIVE_ASM
22 .previous
23
24 __INIT
25ENTRY(startup_xen)
26 // Calculate load offset.
27 // The constant, LOAD_OFFSET, can't be used because the boot
28 // loader doesn't always load to the LMA specified by the vmlinux.lds.
29 mov r9=ip // must be the first instruction to make sure
30 // that r9 = the physical address of startup_xen.
31 // Usually r9 = startup_xen - LOAD_OFFSET
32 movl r8=startup_xen
33 ;;
34 sub r9=r9,r8 // Usually r9 = -LOAD_OFFSET.
35
36 mov r10=PARAVIRT_HYPERVISOR_TYPE_XEN
37 movl r11=_start
38 ;;
39 add r11=r11,r9
40 movl r8=hypervisor_type
41 ;;
42 add r8=r8,r9
43 mov b0=r11
44 ;;
45 st8 [r8]=r10
46 br.cond.sptk.many b0
47 ;;
48END(startup_xen)
49
50 ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
51 ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
52 ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
53 ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, data8.ua startup_xen - LOAD_OFFSET)
54
55#define isBP p3 // are we the Bootstrap Processor?
56
57 .text
58
59GLOBAL_ENTRY(xen_setup_hook)
60 mov r8=XEN_PV_DOMAIN_ASM
61(isBP) movl r9=xen_domain_type;;
62(isBP) st4 [r9]=r8
63 movl r10=xen_ivt;;
64
65 mov cr.iva=r10
66
67 /* Set xsi base. */
68#define FW_HYPERCALL_SET_SHARED_INFO_VA 0x600
69(isBP) mov r2=FW_HYPERCALL_SET_SHARED_INFO_VA
70(isBP) movl r28=XSI_BASE;;
71(isBP) break 0x1000;;
72
73 /* setup pv_ops */
74(isBP) mov r4=rp
75 ;;
76(isBP) br.call.sptk.many rp=xen_setup_pv_ops
77 ;;
78(isBP) mov rp=r4
79 ;;
80
81 br.ret.sptk.many rp
82 ;;
83END(xen_setup_hook)
diff --git a/arch/m32r/oprofile/init.c b/arch/m32r/oprofile/init.c
index b7773e45c43f..fa56860f4258 100644
--- a/arch/m32r/oprofile/init.c
+++ b/arch/m32r/oprofile/init.c
@@ -12,7 +12,7 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/init.h> 13#include <linux/init.h>
14 14
15int __init oprofile_arch_init(struct oprofile_operations * ops) 15int __init oprofile_arch_init(struct oprofile_operations *ops)
16{ 16{
17 return -ENODEV; 17 return -ENODEV;
18} 18}
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index dd2fbd6645c1..3bf3354547f6 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -32,7 +32,7 @@ static int op_mips_setup(void)
32 return 0; 32 return 0;
33} 33}
34 34
35static int op_mips_create_files(struct super_block * sb, struct dentry * root) 35static int op_mips_create_files(struct super_block *sb, struct dentry *root)
36{ 36{
37 int i; 37 int i;
38 38
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
index 2bfc17c30106..f04b54fb37d1 100644
--- a/arch/mips/oprofile/op_impl.h
+++ b/arch/mips/oprofile/op_impl.h
@@ -27,7 +27,7 @@ struct op_counter_config {
27/* Per-architecture configury and hooks. */ 27/* Per-architecture configury and hooks. */
28struct op_mips_model { 28struct op_mips_model {
29 void (*reg_setup) (struct op_counter_config *); 29 void (*reg_setup) (struct op_counter_config *);
30 void (*cpu_setup) (void * dummy); 30 void (*cpu_setup) (void *dummy);
31 int (*init)(void); 31 int (*init)(void);
32 void (*exit)(void); 32 void (*exit)(void);
33 void (*cpu_start)(void *args); 33 void (*cpu_start)(void *args);
diff --git a/arch/mips/oprofile/op_model_rm9000.c b/arch/mips/oprofile/op_model_rm9000.c
index a45d3202894f..3aa81384966d 100644
--- a/arch/mips/oprofile/op_model_rm9000.c
+++ b/arch/mips/oprofile/op_model_rm9000.c
@@ -80,7 +80,7 @@ static void rm9000_cpu_stop(void *args)
80 write_c0_perfcontrol(0); 80 write_c0_perfcontrol(0);
81} 81}
82 82
83static irqreturn_t rm9000_perfcount_handler(int irq, void * dev_id) 83static irqreturn_t rm9000_perfcount_handler(int irq, void *dev_id)
84{ 84{
85 unsigned int control = read_c0_perfcontrol(); 85 unsigned int control = read_c0_perfcontrol();
86 struct pt_regs *regs = get_irq_regs(); 86 struct pt_regs *regs = get_irq_regs();
diff --git a/arch/parisc/oprofile/init.c b/arch/parisc/oprofile/init.c
index 113f5139f551..026cba2af07a 100644
--- a/arch/parisc/oprofile/init.c
+++ b/arch/parisc/oprofile/init.c
@@ -12,7 +12,7 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/oprofile.h> 13#include <linux/oprofile.h>
14 14
15int __init oprofile_arch_init(struct oprofile_operations * ops) 15int __init oprofile_arch_init(struct oprofile_operations *ops)
16{ 16{
17 return -ENODEV; 17 return -ENODEV;
18} 18}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9391199d9e77..5b1527883fcb 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -19,9 +19,6 @@ config WORD_SIZE
19 default 64 if PPC64 19 default 64 if PPC64
20 default 32 if !PPC64 20 default 32 if !PPC64
21 21
22config PPC_MERGE
23 def_bool y
24
25config ARCH_PHYS_ADDR_T_64BIT 22config ARCH_PHYS_ADDR_T_64BIT
26 def_bool PPC64 || PHYS_64BIT 23 def_bool PPC64 || PHYS_64BIT
27 24
@@ -326,13 +323,11 @@ config KEXEC
326 323
327config CRASH_DUMP 324config CRASH_DUMP
328 bool "Build a kdump crash kernel" 325 bool "Build a kdump crash kernel"
329 depends on PPC_MULTIPLATFORM && PPC64 326 depends on PPC_MULTIPLATFORM && PPC64 && RELOCATABLE
330 help 327 help
331 Build a kernel suitable for use as a kdump capture kernel. 328 Build a kernel suitable for use as a kdump capture kernel.
332 The kernel will be linked at a different address than normal, and 329 The same kernel binary can be used as production kernel and dump
333 so can only be used for Kdump. 330 capture kernel.
334
335 Don't change this unless you know what you are doing.
336 331
337config PHYP_DUMP 332config PHYP_DUMP
338 bool "Hypervisor-assisted dump (EXPERIMENTAL)" 333 bool "Hypervisor-assisted dump (EXPERIMENTAL)"
@@ -832,11 +827,9 @@ config PAGE_OFFSET
832 default "0xc000000000000000" 827 default "0xc000000000000000"
833config KERNEL_START 828config KERNEL_START
834 hex 829 hex
835 default "0xc000000002000000" if CRASH_DUMP
836 default "0xc000000000000000" 830 default "0xc000000000000000"
837config PHYSICAL_START 831config PHYSICAL_START
838 hex 832 hex
839 default "0x02000000" if CRASH_DUMP
840 default "0x00000000" 833 default "0x00000000"
841endif 834endif
842 835
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index aac1406ccba5..8fc6d72849ae 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -68,7 +68,8 @@ src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c
68 fixed-head.S ep88xc.c ep405.c cuboot-c2k.c \ 68 fixed-head.S ep88xc.c ep405.c cuboot-c2k.c \
69 cuboot-katmai.c cuboot-rainier.c redboot-8xx.c ep8248e.c \ 69 cuboot-katmai.c cuboot-rainier.c redboot-8xx.c ep8248e.c \
70 cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \ 70 cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
71 virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c 71 virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \
72 cuboot-acadia.c
72src-boot := $(src-wlib) $(src-plat) empty.c 73src-boot := $(src-wlib) $(src-plat) empty.c
73 74
74src-boot := $(addprefix $(obj)/, $(src-boot)) 75src-boot := $(addprefix $(obj)/, $(src-boot))
@@ -211,6 +212,7 @@ image-$(CONFIG_DEFAULT_UIMAGE) += uImage
211# Board ports in arch/powerpc/platform/40x/Kconfig 212# Board ports in arch/powerpc/platform/40x/Kconfig
212image-$(CONFIG_EP405) += dtbImage.ep405 213image-$(CONFIG_EP405) += dtbImage.ep405
213image-$(CONFIG_WALNUT) += treeImage.walnut 214image-$(CONFIG_WALNUT) += treeImage.walnut
215image-$(CONFIG_ACADIA) += cuImage.acadia
214 216
215# Board ports in arch/powerpc/platform/44x/Kconfig 217# Board ports in arch/powerpc/platform/44x/Kconfig
216image-$(CONFIG_EBONY) += treeImage.ebony cuImage.ebony 218image-$(CONFIG_EBONY) += treeImage.ebony cuImage.ebony
@@ -319,6 +321,9 @@ $(obj)/zImage.iseries: vmlinux
319$(obj)/uImage: vmlinux $(wrapperbits) 321$(obj)/uImage: vmlinux $(wrapperbits)
320 $(call if_changed,wrap,uboot) 322 $(call if_changed,wrap,uboot)
321 323
324$(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
325 $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
326
322$(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) 327$(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
323 $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb) 328 $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb)
324 329
diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
index dcc9ab2ca823..3091d1d21aef 100644
--- a/arch/powerpc/boot/addnote.c
+++ b/arch/powerpc/boot/addnote.c
@@ -11,7 +11,7 @@
11 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 * 13 *
14 * Usage: addnote zImage [note.elf] 14 * Usage: addnote [-r realbase] zImage [note.elf]
15 * 15 *
16 * If note.elf is supplied, it is the name of an ELF file that contains 16 * If note.elf is supplied, it is the name of an ELF file that contains
17 * an RPA note to use instead of the built-in one. Alternatively, the 17 * an RPA note to use instead of the built-in one. Alternatively, the
@@ -153,18 +153,31 @@ unsigned char *read_rpanote(const char *fname, int *nnp)
153int 153int
154main(int ac, char **av) 154main(int ac, char **av)
155{ 155{
156 int fd, n, i; 156 int fd, n, i, ai;
157 int ph, ps, np; 157 int ph, ps, np;
158 int nnote, nnote2, ns; 158 int nnote, nnote2, ns;
159 unsigned char *rpap; 159 unsigned char *rpap;
160 160 char *p, *endp;
161 if (ac != 2 && ac != 3) { 161
162 fprintf(stderr, "Usage: %s elf-file [rpanote.elf]\n", av[0]); 162 ai = 1;
163 if (ac >= ai + 2 && strcmp(av[ai], "-r") == 0) {
164 /* process -r realbase */
165 p = av[ai + 1];
166 descr[1] = strtol(p, &endp, 16);
167 if (endp == p || *endp != 0) {
168 fprintf(stderr, "Can't parse -r argument '%s' as hex\n",
169 p);
170 exit(1);
171 }
172 ai += 2;
173 }
174 if (ac != ai + 1 && ac != ai + 2) {
175 fprintf(stderr, "Usage: %s [-r realbase] elf-file [rpanote.elf]\n", av[0]);
163 exit(1); 176 exit(1);
164 } 177 }
165 fd = open(av[1], O_RDWR); 178 fd = open(av[ai], O_RDWR);
166 if (fd < 0) { 179 if (fd < 0) {
167 perror(av[1]); 180 perror(av[ai]);
168 exit(1); 181 exit(1);
169 } 182 }
170 183
@@ -184,12 +197,12 @@ main(int ac, char **av)
184 if (buf[E_IDENT+EI_CLASS] != ELFCLASS32 197 if (buf[E_IDENT+EI_CLASS] != ELFCLASS32
185 || buf[E_IDENT+EI_DATA] != ELFDATA2MSB) { 198 || buf[E_IDENT+EI_DATA] != ELFDATA2MSB) {
186 fprintf(stderr, "%s is not a big-endian 32-bit ELF image\n", 199 fprintf(stderr, "%s is not a big-endian 32-bit ELF image\n",
187 av[1]); 200 av[ai]);
188 exit(1); 201 exit(1);
189 } 202 }
190 203
191 if (ac == 3) 204 if (ac == ai + 2)
192 rpap = read_rpanote(av[2], &nnote2); 205 rpap = read_rpanote(av[ai + 1], &nnote2);
193 206
194 ph = GET_32BE(buf, E_PHOFF); 207 ph = GET_32BE(buf, E_PHOFF);
195 ps = GET_16BE(buf, E_PHENTSIZE); 208 ps = GET_16BE(buf, E_PHENTSIZE);
@@ -202,7 +215,7 @@ main(int ac, char **av)
202 for (i = 0; i < np; ++i) { 215 for (i = 0; i < np; ++i) {
203 if (GET_32BE(buf, ph + PH_TYPE) == PT_NOTE) { 216 if (GET_32BE(buf, ph + PH_TYPE) == PT_NOTE) {
204 fprintf(stderr, "%s already has a note entry\n", 217 fprintf(stderr, "%s already has a note entry\n",
205 av[1]); 218 av[ai]);
206 exit(0); 219 exit(0);
207 } 220 }
208 ph += ps; 221 ph += ps;
@@ -260,18 +273,18 @@ main(int ac, char **av)
260 exit(1); 273 exit(1);
261 } 274 }
262 if (i < n) { 275 if (i < n) {
263 fprintf(stderr, "%s: write truncated\n", av[1]); 276 fprintf(stderr, "%s: write truncated\n", av[ai]);
264 exit(1); 277 exit(1);
265 } 278 }
266 279
267 exit(0); 280 exit(0);
268 281
269 notelf: 282 notelf:
270 fprintf(stderr, "%s does not appear to be an ELF file\n", av[1]); 283 fprintf(stderr, "%s does not appear to be an ELF file\n", av[ai]);
271 exit(1); 284 exit(1);
272 285
273 nospace: 286 nospace:
274 fprintf(stderr, "sorry, I can't find space in %s to put the note\n", 287 fprintf(stderr, "sorry, I can't find space in %s to put the note\n",
275 av[1]); 288 av[ai]);
276 exit(1); 289 exit(1);
277} 290}
diff --git a/arch/powerpc/boot/cuboot-52xx.c b/arch/powerpc/boot/cuboot-52xx.c
index a8611546a656..4c42ec8687be 100644
--- a/arch/powerpc/boot/cuboot-52xx.c
+++ b/arch/powerpc/boot/cuboot-52xx.c
@@ -37,6 +37,10 @@ static void platform_fixups(void)
37 * this can do a simple path lookup. 37 * this can do a simple path lookup.
38 */ 38 */
39 soc = find_node_by_devtype(NULL, "soc"); 39 soc = find_node_by_devtype(NULL, "soc");
40 if (!soc)
41 soc = find_node_by_compatible(NULL, "fsl,mpc5200-immr");
42 if (!soc)
43 soc = find_node_by_compatible(NULL, "fsl,mpc5200b-immr");
40 if (soc) { 44 if (soc) {
41 setprop(soc, "bus-frequency", &bd.bi_ipbfreq, 45 setprop(soc, "bus-frequency", &bd.bi_ipbfreq,
42 sizeof(bd.bi_ipbfreq)); 46 sizeof(bd.bi_ipbfreq));
diff --git a/arch/powerpc/boot/cuboot-acadia.c b/arch/powerpc/boot/cuboot-acadia.c
new file mode 100644
index 000000000000..0634aba6348a
--- /dev/null
+++ b/arch/powerpc/boot/cuboot-acadia.c
@@ -0,0 +1,174 @@
1/*
2 * Old U-boot compatibility for Acadia
3 *
4 * Author: Josh Boyer <jwboyer@linux.vnet.ibm.com>
5 *
6 * Copyright 2008 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 */
12
13#include "ops.h"
14#include "io.h"
15#include "dcr.h"
16#include "stdio.h"
17#include "4xx.h"
18#include "44x.h"
19#include "cuboot.h"
20
21#define TARGET_4xx
22#include "ppcboot.h"
23
24static bd_t bd;
25
26#define CPR_PERD0_SPIDV_MASK 0x000F0000 /* SPI Clock Divider */
27
28#define PLLC_SRC_MASK 0x20000000 /* PLL feedback source */
29
30#define PLLD_FBDV_MASK 0x1F000000 /* PLL feedback divider value */
31#define PLLD_FWDVA_MASK 0x000F0000 /* PLL forward divider A value */
32#define PLLD_FWDVB_MASK 0x00000700 /* PLL forward divider B value */
33
34#define PRIMAD_CPUDV_MASK 0x0F000000 /* CPU Clock Divisor Mask */
35#define PRIMAD_PLBDV_MASK 0x000F0000 /* PLB Clock Divisor Mask */
36#define PRIMAD_OPBDV_MASK 0x00000F00 /* OPB Clock Divisor Mask */
37#define PRIMAD_EBCDV_MASK 0x0000000F /* EBC Clock Divisor Mask */
38
39#define PERD0_PWMDV_MASK 0xFF000000 /* PWM Divider Mask */
40#define PERD0_SPIDV_MASK 0x000F0000 /* SPI Divider Mask */
41#define PERD0_U0DV_MASK 0x0000FF00 /* UART 0 Divider Mask */
42#define PERD0_U1DV_MASK 0x000000FF /* UART 1 Divider Mask */
43
44static void get_clocks(void)
45{
46 unsigned long sysclk, cpr_plld, cpr_pllc, cpr_primad, plloutb, i;
47 unsigned long pllFwdDiv, pllFwdDivB, pllFbkDiv, pllPlbDiv, pllExtBusDiv;
48 unsigned long pllOpbDiv, freqEBC, freqUART, freqOPB;
49 unsigned long div; /* total divisor udiv * bdiv */
50 unsigned long umin; /* minimum udiv */
51 unsigned short diff; /* smallest diff */
52 unsigned long udiv; /* best udiv */
53 unsigned short idiff; /* current diff */
54 unsigned short ibdiv; /* current bdiv */
55 unsigned long est; /* current estimate */
56 unsigned long baud;
57 void *np;
58
59 /* read the sysclk value from the CPLD */
60 sysclk = (in_8((unsigned char *)0x80000000) == 0xc) ? 66666666 : 33333000;
61
62 /*
63 * Read PLL Mode registers
64 */
65 cpr_plld = CPR0_READ(DCRN_CPR0_PLLD);
66 cpr_pllc = CPR0_READ(DCRN_CPR0_PLLC);
67
68 /*
69 * Determine forward divider A
70 */
71 pllFwdDiv = ((cpr_plld & PLLD_FWDVA_MASK) >> 16);
72
73 /*
74 * Determine forward divider B
75 */
76 pllFwdDivB = ((cpr_plld & PLLD_FWDVB_MASK) >> 8);
77 if (pllFwdDivB == 0)
78 pllFwdDivB = 8;
79
80 /*
81 * Determine FBK_DIV.
82 */
83 pllFbkDiv = ((cpr_plld & PLLD_FBDV_MASK) >> 24);
84 if (pllFbkDiv == 0)
85 pllFbkDiv = 256;
86
87 /*
88 * Read CPR_PRIMAD register
89 */
90 cpr_primad = CPR0_READ(DCRN_CPR0_PRIMAD);
91
92 /*
93 * Determine PLB_DIV.
94 */
95 pllPlbDiv = ((cpr_primad & PRIMAD_PLBDV_MASK) >> 16);
96 if (pllPlbDiv == 0)
97 pllPlbDiv = 16;
98
99 /*
100 * Determine EXTBUS_DIV.
101 */
102 pllExtBusDiv = (cpr_primad & PRIMAD_EBCDV_MASK);
103 if (pllExtBusDiv == 0)
104 pllExtBusDiv = 16;
105
106 /*
107 * Determine OPB_DIV.
108 */
109 pllOpbDiv = ((cpr_primad & PRIMAD_OPBDV_MASK) >> 8);
110 if (pllOpbDiv == 0)
111 pllOpbDiv = 16;
112
113 /* There is a bug in U-Boot that prevents us from using
114 * bd.bi_opbfreq because U-Boot doesn't populate it for
115 * 405EZ. We get to calculate it, yay!
116 */
117 freqOPB = (sysclk *pllFbkDiv) /pllOpbDiv;
118
119 freqEBC = (sysclk * pllFbkDiv) / pllExtBusDiv;
120
121 plloutb = ((sysclk * ((cpr_pllc & PLLC_SRC_MASK) ?
122 pllFwdDivB : pllFwdDiv) *
123 pllFbkDiv) / pllFwdDivB);
124
125 np = find_node_by_alias("serial0");
126 if (getprop(np, "current-speed", &baud, sizeof(baud)) != sizeof(baud))
127 fatal("no current-speed property\n\r");
128
129 udiv = 256; /* Assume lowest possible serial clk */
130 div = plloutb / (16 * baud); /* total divisor */
131 umin = (plloutb / freqOPB) << 1; /* 2 x OPB divisor */
132 diff = 256; /* highest possible */
133
134 /* i is the test udiv value -- start with the largest
135 * possible (256) to minimize serial clock and constrain
136 * search to umin.
137 */
138 for (i = 256; i > umin; i--) {
139 ibdiv = div / i;
140 est = i * ibdiv;
141 idiff = (est > div) ? (est-div) : (div-est);
142 if (idiff == 0) {
143 udiv = i;
144 break; /* can't do better */
145 } else if (idiff < diff) {
146 udiv = i; /* best so far */
147 diff = idiff; /* update lowest diff*/
148 }
149 }
150 freqUART = plloutb / udiv;
151
152 dt_fixup_cpu_clocks(bd.bi_procfreq, bd.bi_intfreq, bd.bi_plb_busfreq);
153 dt_fixup_clock("/plb/ebc", freqEBC);
154 dt_fixup_clock("/plb/opb", freqOPB);
155 dt_fixup_clock("/plb/opb/serial@ef600300", freqUART);
156 dt_fixup_clock("/plb/opb/serial@ef600400", freqUART);
157}
158
159static void acadia_fixups(void)
160{
161 dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
162 get_clocks();
163 dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr);
164}
165
166void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
167 unsigned long r6, unsigned long r7)
168{
169 CUBOOT_INIT();
170 platform_ops.fixups = acadia_fixups;
171 platform_ops.exit = ibm40x_dbcr_reset;
172 fdt_init(_dtb_start);
173 serial_console_init();
174}
diff --git a/arch/powerpc/boot/dts/acadia.dts b/arch/powerpc/boot/dts/acadia.dts
new file mode 100644
index 000000000000..57291f61ffe7
--- /dev/null
+++ b/arch/powerpc/boot/dts/acadia.dts
@@ -0,0 +1,224 @@
1/*
2 * Device Tree Source for AMCC Acadia (405EZ)
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 */
10
11/dts-v1/;
12
13/ {
14 #address-cells = <1>;
15 #size-cells = <1>;
16 model = "amcc,acadia";
17 compatible = "amcc,acadia";
18 dcr-parent = <&{/cpus/cpu@0}>;
19
20 aliases {
21 ethernet0 = &EMAC0;
22 serial0 = &UART0;
23 serial1 = &UART1;
24 };
25
26 cpus {
27 #address-cells = <1>;
28 #size-cells = <0>;
29
30 cpu@0 {
31 device_type = "cpu";
32 model = "PowerPC,405EZ";
33 reg = <0x0>;
34 clock-frequency = <0>; /* Filled in by wrapper */
35 timebase-frequency = <0>; /* Filled in by wrapper */
36 i-cache-line-size = <32>;
37 d-cache-line-size = <32>;
38 i-cache-size = <16384>;
39 d-cache-size = <16384>;
40 dcr-controller;
41 dcr-access-method = "native";
42 };
43 };
44
45 memory {
46 device_type = "memory";
47 reg = <0x0 0x0>; /* Filled in by wrapper */
48 };
49
50 UIC0: interrupt-controller {
51 compatible = "ibm,uic-405ez", "ibm,uic";
52 interrupt-controller;
53 dcr-reg = <0x0c0 0x009>;
54 cell-index = <0>;
55 #address-cells = <0>;
56 #size-cells = <0>;
57 #interrupt-cells = <2>;
58 };
59
60 plb {
61 compatible = "ibm,plb-405ez", "ibm,plb3";
62 #address-cells = <1>;
63 #size-cells = <1>;
64 ranges;
65 clock-frequency = <0>; /* Filled in by wrapper */
66
67 MAL0: mcmal {
68 compatible = "ibm,mcmal-405ez", "ibm,mcmal";
69 dcr-reg = <0x380 0x62>;
70 num-tx-chans = <1>;
71 num-rx-chans = <1>;
72 interrupt-parent = <&UIC0>;
73 /* 405EZ has only 3 interrupts to the UIC, as
74 * SERR, TXDE, and RXDE are or'd together into
75 * one UIC bit
76 */
77 interrupts = <
78 0x13 0x4 /* TXEOB */
79 0x15 0x4 /* RXEOB */
80 0x12 0x4 /* SERR, TXDE, RXDE */>;
81 };
82
83 POB0: opb {
84 compatible = "ibm,opb-405ez", "ibm,opb";
85 #address-cells = <1>;
86 #size-cells = <1>;
87 ranges;
88 dcr-reg = <0x0a 0x05>;
89 clock-frequency = <0>; /* Filled in by wrapper */
90
91 UART0: serial@ef600300 {
92 device_type = "serial";
93 compatible = "ns16550";
94 reg = <0xef600300 0x8>;
95 virtual-reg = <0xef600300>;
96 clock-frequency = <0>; /* Filled in by wrapper */
97 current-speed = <115200>;
98 interrupt-parent = <&UIC0>;
99 interrupts = <0x5 0x4>;
100 };
101
102 UART1: serial@ef600400 {
103 device_type = "serial";
104 compatible = "ns16550";
105 reg = <0xef600400 0x8>;
106 clock-frequency = <0>; /* Filled in by wrapper */
107 current-speed = <115200>;
108 interrupt-parent = <&UIC0>;
109 interrupts = <0x6 0x4>;
110 };
111
112 IIC: i2c@ef600500 {
113 compatible = "ibm,iic-405ez", "ibm,iic";
114 reg = <0xef600500 0x11>;
115 interrupt-parent = <&UIC0>;
116 interrupts = <0xa 0x4>;
117 };
118
119 GPIO0: gpio@ef600700 {
120 compatible = "ibm,gpio-405ez";
121 reg = <0xef600700 0x20>;
122 };
123
124 GPIO1: gpio@ef600800 {
125 compatible = "ibm,gpio-405ez";
126 reg = <0xef600800 0x20>;
127 };
128
129 EMAC0: ethernet@ef600900 {
130 device_type = "network";
131 compatible = "ibm,emac-405ez", "ibm,emac";
132 interrupt-parent = <&UIC0>;
133 interrupts = <
134 0x10 0x4 /* Ethernet */
135 0x11 0x4 /* Ethernet Wake up */>;
136 local-mac-address = [000000000000]; /* Filled in by wrapper */
137 reg = <0xef600900 0x70>;
138 mal-device = <&MAL0>;
139 mal-tx-channel = <0>;
140 mal-rx-channel = <0>;
141 cell-index = <0>;
142 max-frame-size = <1500>;
143 rx-fifo-size = <4096>;
144 tx-fifo-size = <2048>;
145 phy-mode = "mii";
146 phy-map = <0x0>;
147 };
148
149 CAN0: can@ef601000 {
150 compatible = "amcc,can-405ez";
151 reg = <0xef601000 0x620>;
152 interrupt-parent = <&UIC0>;
153 interrupts = <0x7 0x4>;
154 };
155
156 CAN1: can@ef601800 {
157 compatible = "amcc,can-405ez";
158 reg = <0xef601800 0x620>;
159 interrupt-parent = <&UIC0>;
160 interrupts = <0x8 0x4>;
161 };
162
163 cameleon@ef602000 {
164 compatible = "amcc,cameleon-405ez";
165 reg = <0xef602000 0x800>;
166 interrupt-parent = <&UIC0>;
167 interrupts = <0xb 0x4 0xc 0x4>;
168 };
169
170 ieee1588@ef602800 {
171 compatible = "amcc,ieee1588-405ez";
172 reg = <0xef602800 0x60>;
173 interrupt-parent = <&UIC0>;
174 interrupts = <0x4 0x4>;
175 /* This thing is a bit weird. It has it's own UIC
176 * that it uses to generate snapshot triggers. We
177 * don't really support this device yet, and it needs
178 * work to figure this out.
179 */
180 dcr-reg = <0xe0 0x9>;
181 };
182
183 usb@ef603000 {
184 compatible = "ohci-be";
185 reg = <0xef603000 0x80>;
186 interrupts-parent = <&UIC0>;
187 interrupts = <0xd 0x4 0xe 0x4>;
188 };
189
190 dac@ef603300 {
191 compatible = "amcc,dac-405ez";
192 reg = <0xef603300 0x40>;
193 interrupt-parent = <&UIC0>;
194 interrupts = <0x18 0x4>;
195 };
196
197 adc@ef603400 {
198 compatible = "amcc,adc-405ez";
199 reg = <0xef603400 0x40>;
200 interrupt-parent = <&UIC0>;
201 interrupts = <0x17 0x4>;
202 };
203
204 spi@ef603500 {
205 compatible = "amcc,spi-405ez";
206 reg = <0xef603500 0x100>;
207 interrupt-parent = <&UIC0>;
208 interrupts = <0x9 0x4>;
209 };
210 };
211
212 EBC0: ebc {
213 compatible = "ibm,ebc-405ez", "ibm,ebc";
214 dcr-reg = <0x12 0x2>;
215 #address-cells = <2>;
216 #size-cells = <1>;
217 clock-frequency = <0>; /* Filled in by wrapper */
218 };
219 };
220
221 chosen {
222 linux,stdout-path = "/plb/opb/serial@ef600300";
223 };
224};
diff --git a/arch/powerpc/boot/dts/hcu4.dts b/arch/powerpc/boot/dts/hcu4.dts
new file mode 100644
index 000000000000..7988598da4c9
--- /dev/null
+++ b/arch/powerpc/boot/dts/hcu4.dts
@@ -0,0 +1,168 @@
1/*
2* Device Tree Source for Netstal Maschinen HCU4
3* based on the IBM Walnut
4*
5* Copyright 2008
6* Niklaus Giger <niklaus.giger@member.fsf.org>
7*
8* Copyright 2007 IBM Corp.
9* Josh Boyer <jwboyer@linux.vnet.ibm.com>
10*
11* This file is licensed under the terms of the GNU General Public
12* License version 2. This program is licensed "as is" without
13* any warranty of any kind, whether express or implied.
14*/
15
16/dts-v1/;
17
18/ {
19 #address-cells = <0x1>;
20 #size-cells = <0x1>;
21 model = "netstal,hcu4";
22 compatible = "netstal,hcu4";
23 dcr-parent = <0x1>;
24
25 aliases {
26 ethernet0 = "/plb/opb/ethernet@ef600800";
27 serial0 = "/plb/opb/serial@ef600300";
28 };
29
30 cpus {
31 #address-cells = <0x1>;
32 #size-cells = <0x0>;
33
34 cpu@0 {
35 device_type = "cpu";
36 model = "PowerPC,405GPr";
37 reg = <0x0>;
38 clock-frequency = <0>; /* Filled in by U-Boot */
39 timebase-frequency = <0x0>; /* Filled in by U-Boot */
40 i-cache-line-size = <0x20>;
41 d-cache-line-size = <0x20>;
42 i-cache-size = <0x4000>;
43 d-cache-size = <0x4000>;
44 dcr-controller;
45 dcr-access-method = "native";
46 linux,phandle = <0x1>;
47 };
48 };
49
50 memory {
51 device_type = "memory";
52 reg = <0x0 0x0>; /* Filled in by U-Boot */
53 };
54
55 UIC0: interrupt-controller {
56 compatible = "ibm,uic";
57 interrupt-controller;
58 cell-index = <0x0>;
59 dcr-reg = <0xc0 0x9>;
60 #address-cells = <0x0>;
61 #size-cells = <0x0>;
62 #interrupt-cells = <0x2>;
63 linux,phandle = <0x2>;
64 };
65
66 plb {
67 compatible = "ibm,plb3";
68 #address-cells = <0x1>;
69 #size-cells = <0x1>;
70 ranges;
71 clock-frequency = <0x0>; /* Filled in by U-Boot */
72
73 SDRAM0: memory-controller {
74 compatible = "ibm,sdram-405gp";
75 dcr-reg = <0x10 0x2>;
76 };
77
78 MAL: mcmal {
79 compatible = "ibm,mcmal-405gp", "ibm,mcmal";
80 dcr-reg = <0x180 0x62>;
81 num-tx-chans = <0x1>;
82 num-rx-chans = <0x1>;
83 interrupt-parent = <0x2>;
84 interrupts = <0xb 0x4 0xc 0x4 0xa 0x4 0xd 0x4 0xe 0x4>;
85 linux,phandle = <0x3>;
86 };
87
88 POB0: opb {
89 compatible = "ibm,opb-405gp", "ibm,opb";
90 #address-cells = <0x1>;
91 #size-cells = <0x1>;
92 ranges = <0xef600000 0xef600000 0xa00000>;
93 dcr-reg = <0xa0 0x5>;
94 clock-frequency = <0x0>; /* Filled in by U-Boot */
95
96 UART0: serial@ef600300 {
97 device_type = "serial";
98 compatible = "ns16550";
99 reg = <0xef600300 0x8>;
100 virtual-reg = <0xef600300>;
101 clock-frequency = <0x0>;/* Filled in by U-Boot */
102 current-speed = <0>; /* Filled in by U-Boot */
103 interrupt-parent = <0x2>;
104 interrupts = <0x0 0x4>;
105 };
106
107 IIC: i2c@ef600500 {
108 compatible = "ibm,iic-405gp", "ibm,iic";
109 reg = <0xef600500 0x11>;
110 interrupt-parent = <0x2>;
111 interrupts = <0x2 0x4>;
112 };
113
114 GPIO: gpio@ef600700 {
115 compatible = "ibm,gpio-405gp";
116 reg = <0xef600700 0x20>;
117 };
118
119 EMAC: ethernet@ef600800 {
120 device_type = "network";
121 compatible = "ibm,emac-405gp", "ibm,emac";
122 interrupt-parent = <0x2>;
123 interrupts = <0xf 0x4 0x9 0x4>;
124 local-mac-address = [00 00 00 00 00 00];
125 reg = <0xef600800 0x70>;
126 mal-device = <0x3>;
127 mal-tx-channel = <0x0>;
128 mal-rx-channel = <0x0>;
129 cell-index = <0x0>;
130 max-frame-size = <0x5dc>;
131 rx-fifo-size = <0x1000>;
132 tx-fifo-size = <0x800>;
133 phy-mode = "rmii";
134 phy-map = <0x1>;
135 };
136 };
137
138 EBC0: ebc {
139 compatible = "ibm,ebc-405gp", "ibm,ebc";
140 dcr-reg = <0x12 0x2>;
141 #address-cells = <0x2>;
142 #size-cells = <0x1>;
143 clock-frequency = <0x0>; /* Filled in by U-Boot */
144
145 sram@0,0 {
146 reg = <0x0 0x0 0x80000>;
147 };
148
149 flash@0,80000 {
150 compatible = "jedec-flash";
151 bank-width = <0x1>;
152 reg = <0x0 0x80000 0x80000>;
153 #address-cells = <0x1>;
154 #size-cells = <0x1>;
155
156 partition@0 {
157 label = "OpenBIOS";
158 reg = <0x0 0x80000>;
159 read-only;
160 };
161 };
162 };
163 };
164
165 chosen {
166 linux,stdout-path = "/plb/opb/serial@ef600300";
167 };
168};
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index 7449e54c1a90..6b850670de1d 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -121,6 +121,14 @@
121 compatible = "dallas,ds1339"; 121 compatible = "dallas,ds1339";
122 reg = <0x68>; 122 reg = <0x68>;
123 }; 123 };
124
125 mcu_pio: mcu@a {
126 #gpio-cells = <2>;
127 compatible = "fsl,mc9s08qg8-mpc8315erdb",
128 "fsl,mcu-mpc8349emitx";
129 reg = <0x0a>;
130 gpio-controller;
131 };
124 }; 132 };
125 133
126 spi@7000 { 134 spi@7000 {
diff --git a/arch/powerpc/boot/dts/mpc832x_mds.dts b/arch/powerpc/boot/dts/mpc832x_mds.dts
index e4cc1768f241..57c595bf1071 100644
--- a/arch/powerpc/boot/dts/mpc832x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc832x_mds.dts
@@ -60,7 +60,7 @@
60 }; 60 };
61 61
62 bcsr@f8000000 { 62 bcsr@f8000000 {
63 device_type = "board-control"; 63 compatible = "fsl,mpc8323mds-bcsr";
64 reg = <0xf8000000 0x8000>; 64 reg = <0xf8000000 0x8000>;
65 }; 65 };
66 66
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 5cedf373a1d8..2c9d54a35bc3 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -83,6 +83,14 @@
83 interrupts = <15 0x8>; 83 interrupts = <15 0x8>;
84 interrupt-parent = <&ipic>; 84 interrupt-parent = <&ipic>;
85 dfsrr; 85 dfsrr;
86
87 rtc@68 {
88 device_type = "rtc";
89 compatible = "dallas,ds1339";
90 reg = <0x68>;
91 interrupts = <18 0x8>;
92 interrupt-parent = <&ipic>;
93 };
86 }; 94 };
87 95
88 spi@7000 { 96 spi@7000 {
@@ -131,6 +139,14 @@
131 interrupt-parent = <&ipic>; 139 interrupt-parent = <&ipic>;
132 interrupts = <71 8>; 140 interrupts = <71 8>;
133 }; 141 };
142
143 mcu_pio: mcu@a {
144 #gpio-cells = <2>;
145 compatible = "fsl,mc9s08qg8-mpc8349emitx",
146 "fsl,mcu-mpc8349emitx";
147 reg = <0x0a>;
148 gpio-controller;
149 };
134 }; 150 };
135 151
136 usb@22000 { 152 usb@22000 {
diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
index 81ae1d3e9440..fa40647ee62e 100644
--- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
@@ -81,6 +81,14 @@
81 interrupts = <15 0x8>; 81 interrupts = <15 0x8>;
82 interrupt-parent = <&ipic>; 82 interrupt-parent = <&ipic>;
83 dfsrr; 83 dfsrr;
84
85 rtc@68 {
86 device_type = "rtc";
87 compatible = "dallas,ds1339";
88 reg = <0x68>;
89 interrupts = <18 0x8>;
90 interrupt-parent = <&ipic>;
91 };
84 }; 92 };
85 93
86 spi@7000 { 94 spi@7000 {
diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
index 04bfde3ea605..c986c541e9bb 100644
--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
@@ -49,7 +49,7 @@
49 }; 49 };
50 50
51 bcsr@e2400000 { 51 bcsr@e2400000 {
52 device_type = "board-control"; 52 compatible = "fsl,mpc8349mds-bcsr";
53 reg = <0xe2400000 0x8000>; 53 reg = <0xe2400000 0x8000>;
54 }; 54 };
55 55
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 66a12d2631fb..14534d04e4db 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -69,7 +69,7 @@
69 }; 69 };
70 70
71 bcsr@1,0 { 71 bcsr@1,0 {
72 device_type = "board-control"; 72 compatible = "fsl,mpc8360mds-bcsr";
73 reg = <1 0 0x8000>; 73 reg = <1 0 0x8000>;
74 }; 74 };
75 }; 75 };
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index 53191ba67aaa..435ef3dd022d 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -121,6 +121,14 @@
121 compatible = "dallas,ds1339"; 121 compatible = "dallas,ds1339";
122 reg = <0x68>; 122 reg = <0x68>;
123 }; 123 };
124
125 mcu_pio: mcu@a {
126 #gpio-cells = <2>;
127 compatible = "fsl,mc9s08qg8-mpc8377erdb",
128 "fsl,mcu-mpc8349emitx";
129 reg = <0x0a>;
130 gpio-controller;
131 };
124 }; 132 };
125 133
126 i2c@3100 { 134 i2c@3100 {
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index 4a09153d160c..b11e68f56a06 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -121,6 +121,14 @@
121 compatible = "dallas,ds1339"; 121 compatible = "dallas,ds1339";
122 reg = <0x68>; 122 reg = <0x68>;
123 }; 123 };
124
125 mcu_pio: mcu@a {
126 #gpio-cells = <2>;
127 compatible = "fsl,mc9s08qg8-mpc8378erdb",
128 "fsl,mcu-mpc8349emitx";
129 reg = <0x0a>;
130 gpio-controller;
131 };
124 }; 132 };
125 133
126 i2c@3100 { 134 i2c@3100 {
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index bbd884ac9dc0..337af6ea26d3 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -121,6 +121,14 @@
121 compatible = "dallas,ds1339"; 121 compatible = "dallas,ds1339";
122 reg = <0x68>; 122 reg = <0x68>;
123 }; 123 };
124
125 mcu_pio: mcu@a {
126 #gpio-cells = <2>;
127 compatible = "fsl,mc9s08qg8-mpc8379erdb",
128 "fsl,mcu-mpc8349emitx";
129 reg = <0x0a>;
130 gpio-controller;
131 };
124 }; 132 };
125 133
126 i2c@3100 { 134 i2c@3100 {
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dts b/arch/powerpc/boot/dts/mpc8536ds.dts
index 93fdd99901b6..35db1e5440c7 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds.dts
@@ -109,7 +109,7 @@
109 reg = <0x0 0x80>; 109 reg = <0x0 0x80>;
110 cell-index = <0>; 110 cell-index = <0>;
111 interrupt-parent = <&mpic>; 111 interrupt-parent = <&mpic>;
112 interrupts = <14 0x2>; 112 interrupts = <20 2>;
113 }; 113 };
114 dma-channel@80 { 114 dma-channel@80 {
115 compatible = "fsl,mpc8536-dma-channel", 115 compatible = "fsl,mpc8536-dma-channel",
@@ -117,7 +117,7 @@
117 reg = <0x80 0x80>; 117 reg = <0x80 0x80>;
118 cell-index = <1>; 118 cell-index = <1>;
119 interrupt-parent = <&mpic>; 119 interrupt-parent = <&mpic>;
120 interrupts = <15 0x2>; 120 interrupts = <21 2>;
121 }; 121 };
122 dma-channel@100 { 122 dma-channel@100 {
123 compatible = "fsl,mpc8536-dma-channel", 123 compatible = "fsl,mpc8536-dma-channel",
@@ -125,7 +125,7 @@
125 reg = <0x100 0x80>; 125 reg = <0x100 0x80>;
126 cell-index = <2>; 126 cell-index = <2>;
127 interrupt-parent = <&mpic>; 127 interrupt-parent = <&mpic>;
128 interrupts = <16 0x2>; 128 interrupts = <22 2>;
129 }; 129 };
130 dma-channel@180 { 130 dma-channel@180 {
131 compatible = "fsl,mpc8536-dma-channel", 131 compatible = "fsl,mpc8536-dma-channel",
@@ -133,7 +133,7 @@
133 reg = <0x180 0x80>; 133 reg = <0x180 0x80>;
134 cell-index = <3>; 134 cell-index = <3>;
135 interrupt-parent = <&mpic>; 135 interrupt-parent = <&mpic>;
136 interrupts = <17 0x2>; 136 interrupts = <23 2>;
137 }; 137 };
138 }; 138 };
139 139
@@ -180,7 +180,7 @@
180 enet0: ethernet@24000 { 180 enet0: ethernet@24000 {
181 cell-index = <0>; 181 cell-index = <0>;
182 device_type = "network"; 182 device_type = "network";
183 model = "TSEC"; 183 model = "eTSEC";
184 compatible = "gianfar"; 184 compatible = "gianfar";
185 reg = <0x24000 0x1000>; 185 reg = <0x24000 0x1000>;
186 local-mac-address = [ 00 00 00 00 00 00 ]; 186 local-mac-address = [ 00 00 00 00 00 00 ];
@@ -193,7 +193,7 @@
193 enet1: ethernet@26000 { 193 enet1: ethernet@26000 {
194 cell-index = <1>; 194 cell-index = <1>;
195 device_type = "network"; 195 device_type = "network";
196 model = "TSEC"; 196 model = "eTSEC";
197 compatible = "gianfar"; 197 compatible = "gianfar";
198 reg = <0x26000 0x1000>; 198 reg = <0x26000 0x1000>;
199 local-mac-address = [ 00 00 00 00 00 00 ]; 199 local-mac-address = [ 00 00 00 00 00 00 ];
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index a15f10343f53..c80158f7741d 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -52,7 +52,7 @@
52 }; 52 };
53 53
54 bcsr@f8000000 { 54 bcsr@f8000000 {
55 device_type = "board-control"; 55 compatible = "fsl,mpc8568mds-bcsr";
56 reg = <0xf8000000 0x8000>; 56 reg = <0xf8000000 0x8000>;
57 }; 57 };
58 58
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
index e124dd18fb5a..cadd4652a695 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds.dts
@@ -13,8 +13,8 @@
13/ { 13/ {
14 model = "fsl,MPC8572DS"; 14 model = "fsl,MPC8572DS";
15 compatible = "fsl,MPC8572DS"; 15 compatible = "fsl,MPC8572DS";
16 #address-cells = <1>; 16 #address-cells = <2>;
17 #size-cells = <1>; 17 #size-cells = <2>;
18 18
19 aliases { 19 aliases {
20 ethernet0 = &enet0; 20 ethernet0 = &enet0;
@@ -61,7 +61,6 @@
61 61
62 memory { 62 memory {
63 device_type = "memory"; 63 device_type = "memory";
64 reg = <0x0 0x0>; // Filled by U-Boot
65 }; 64 };
66 65
67 soc8572@ffe00000 { 66 soc8572@ffe00000 {
@@ -69,8 +68,8 @@
69 #size-cells = <1>; 68 #size-cells = <1>;
70 device_type = "soc"; 69 device_type = "soc";
71 compatible = "simple-bus"; 70 compatible = "simple-bus";
72 ranges = <0x0 0xffe00000 0x100000>; 71 ranges = <0x0 0 0xffe00000 0x100000>;
73 reg = <0xffe00000 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed 72 reg = <0 0xffe00000 0 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed
74 bus-frequency = <0>; // Filled out by uboot. 73 bus-frequency = <0>; // Filled out by uboot.
75 74
76 memory-controller@2000 { 75 memory-controller@2000 {
@@ -351,10 +350,10 @@
351 #interrupt-cells = <1>; 350 #interrupt-cells = <1>;
352 #size-cells = <2>; 351 #size-cells = <2>;
353 #address-cells = <3>; 352 #address-cells = <3>;
354 reg = <0xffe08000 0x1000>; 353 reg = <0 0xffe08000 0 0x1000>;
355 bus-range = <0 255>; 354 bus-range = <0 255>;
356 ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000 355 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
357 0x1000000 0x0 0x0 0xffc00000 0x0 0x10000>; 356 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x00010000>;
358 clock-frequency = <33333333>; 357 clock-frequency = <33333333>;
359 interrupt-parent = <&mpic>; 358 interrupt-parent = <&mpic>;
360 interrupts = <24 2>; 359 interrupts = <24 2>;
@@ -561,10 +560,10 @@
561 #interrupt-cells = <1>; 560 #interrupt-cells = <1>;
562 #size-cells = <2>; 561 #size-cells = <2>;
563 #address-cells = <3>; 562 #address-cells = <3>;
564 reg = <0xffe09000 0x1000>; 563 reg = <0 0xffe09000 0 0x1000>;
565 bus-range = <0 255>; 564 bus-range = <0 255>;
566 ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000 565 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
567 0x1000000 0x0 0x0 0xffc10000 0x0 0x10000>; 566 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x00010000>;
568 clock-frequency = <33333333>; 567 clock-frequency = <33333333>;
569 interrupt-parent = <&mpic>; 568 interrupt-parent = <&mpic>;
570 interrupts = <26 2>; 569 interrupts = <26 2>;
@@ -598,10 +597,10 @@
598 #interrupt-cells = <1>; 597 #interrupt-cells = <1>;
599 #size-cells = <2>; 598 #size-cells = <2>;
600 #address-cells = <3>; 599 #address-cells = <3>;
601 reg = <0xffe0a000 0x1000>; 600 reg = <0 0xffe0a000 0 0x1000>;
602 bus-range = <0 255>; 601 bus-range = <0 255>;
603 ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000 602 ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
604 0x1000000 0x0 0x0 0xffc20000 0x0 0x10000>; 603 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x00010000>;
605 clock-frequency = <33333333>; 604 clock-frequency = <33333333>;
606 interrupt-parent = <&mpic>; 605 interrupt-parent = <&mpic>;
607 interrupts = <27 2>; 606 interrupts = <27 2>;
diff --git a/arch/powerpc/boot/libfdt-wrapper.c b/arch/powerpc/boot/libfdt-wrapper.c
index c541fd8a95d4..9276327bc2bb 100644
--- a/arch/powerpc/boot/libfdt-wrapper.c
+++ b/arch/powerpc/boot/libfdt-wrapper.c
@@ -105,6 +105,11 @@ static int fdt_wrapper_setprop(const void *devp, const char *name,
105 return check_err(rc); 105 return check_err(rc);
106} 106}
107 107
108static int fdt_wrapper_del_node(const void *devp)
109{
110 return fdt_del_node(fdt, devp_offset(devp));
111}
112
108static void *fdt_wrapper_get_parent(const void *devp) 113static void *fdt_wrapper_get_parent(const void *devp)
109{ 114{
110 return offset_devp(fdt_parent_offset(fdt, devp_offset(devp))); 115 return offset_devp(fdt_parent_offset(fdt, devp_offset(devp)));
@@ -165,6 +170,7 @@ static unsigned long fdt_wrapper_finalize(void)
165void fdt_init(void *blob) 170void fdt_init(void *blob)
166{ 171{
167 int err; 172 int err;
173 int bufsize;
168 174
169 dt_ops.finddevice = fdt_wrapper_finddevice; 175 dt_ops.finddevice = fdt_wrapper_finddevice;
170 dt_ops.getprop = fdt_wrapper_getprop; 176 dt_ops.getprop = fdt_wrapper_getprop;
@@ -173,21 +179,21 @@ void fdt_init(void *blob)
173 dt_ops.create_node = fdt_wrapper_create_node; 179 dt_ops.create_node = fdt_wrapper_create_node;
174 dt_ops.find_node_by_prop_value = fdt_wrapper_find_node_by_prop_value; 180 dt_ops.find_node_by_prop_value = fdt_wrapper_find_node_by_prop_value;
175 dt_ops.find_node_by_compatible = fdt_wrapper_find_node_by_compatible; 181 dt_ops.find_node_by_compatible = fdt_wrapper_find_node_by_compatible;
182 dt_ops.del_node = fdt_wrapper_del_node;
176 dt_ops.get_path = fdt_wrapper_get_path; 183 dt_ops.get_path = fdt_wrapper_get_path;
177 dt_ops.finalize = fdt_wrapper_finalize; 184 dt_ops.finalize = fdt_wrapper_finalize;
178 185
179 /* Make sure the dt blob is the right version and so forth */ 186 /* Make sure the dt blob is the right version and so forth */
180 fdt = blob; 187 fdt = blob;
181 err = fdt_open_into(fdt, fdt, fdt_totalsize(blob)); 188 bufsize = fdt_totalsize(fdt) + 4;
182 if (err == -FDT_ERR_NOSPACE) { 189 buf = malloc(bufsize);
183 int bufsize = fdt_totalsize(fdt) + 4; 190 if(!buf)
184 buf = malloc(bufsize); 191 fatal("malloc failed. can't relocate the device tree\n\r");
185 err = fdt_open_into(fdt, buf, bufsize); 192
186 } 193 err = fdt_open_into(fdt, buf, bufsize);
187 194
188 if (err != 0) 195 if (err != 0)
189 fatal("fdt_init(): %s\n\r", fdt_strerror(err)); 196 fatal("fdt_init(): %s\n\r", fdt_strerror(err));
190 197
191 if (buf) 198 fdt = buf;
192 fdt = buf;
193} 199}
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index 9e7f3ddd9913..ae32801ebd69 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -56,9 +56,19 @@ static struct addr_range prep_kernel(void)
56 if (platform_ops.vmlinux_alloc) { 56 if (platform_ops.vmlinux_alloc) {
57 addr = platform_ops.vmlinux_alloc(ei.memsize); 57 addr = platform_ops.vmlinux_alloc(ei.memsize);
58 } else { 58 } else {
59 if ((unsigned long)_start < ei.memsize) 59 /*
60 * Check if the kernel image (without bss) would overwrite the
61 * bootwrapper. The device tree has been moved in fdt_init()
62 * to an area allocated with malloc() (somewhere past _end).
63 */
64 if ((unsigned long)_start < ei.loadsize)
60 fatal("Insufficient memory for kernel at address 0!" 65 fatal("Insufficient memory for kernel at address 0!"
61 " (_start=%p)\n\r", _start); 66 " (_start=%p, uncomressed size=%08x)\n\r",
67 _start, ei.loadsize);
68
69 if ((unsigned long)_end < ei.memsize)
70 fatal("The final kernel image would overwrite the "
71 "device tree\n\r");
62 } 72 }
63 73
64 /* Finally, gunzip the kernel */ 74 /* Finally, gunzip the kernel */
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 321e2f5afe71..b3218ce451bb 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -40,6 +40,7 @@ struct dt_ops {
40 const int buflen); 40 const int buflen);
41 int (*setprop)(const void *phandle, const char *name, 41 int (*setprop)(const void *phandle, const char *name,
42 const void *buf, const int buflen); 42 const void *buf, const int buflen);
43 int (*del_node)(const void *phandle);
43 void *(*get_parent)(const void *phandle); 44 void *(*get_parent)(const void *phandle);
44 /* The node must not already exist. */ 45 /* The node must not already exist. */
45 void *(*create_node)(const void *parent, const char *name); 46 void *(*create_node)(const void *parent, const char *name);
@@ -126,6 +127,11 @@ static inline int setprop_str(void *devp, const char *name, const char *buf)
126 return -1; 127 return -1;
127} 128}
128 129
130static inline int del_node(const void *devp)
131{
132 return dt_ops.del_node ? dt_ops.del_node(devp) : -1;
133}
134
129static inline void *get_parent(const char *devp) 135static inline void *get_parent(const char *devp)
130{ 136{
131 return dt_ops.get_parent ? dt_ops.get_parent(devp) : NULL; 137 return dt_ops.get_parent ? dt_ops.get_parent(devp) : NULL;
diff --git a/arch/powerpc/boot/string.S b/arch/powerpc/boot/string.S
index 643e4cb2f11d..acc9428f2789 100644
--- a/arch/powerpc/boot/string.S
+++ b/arch/powerpc/boot/string.S
@@ -235,7 +235,7 @@ memchr:
235 .globl memcmp 235 .globl memcmp
236memcmp: 236memcmp:
237 cmpwi 0,r5,0 237 cmpwi 0,r5,0
238 blelr 238 ble 2f
239 mtctr r5 239 mtctr r5
240 addi r6,r3,-1 240 addi r6,r3,-1
241 addi r4,r4,-1 241 addi r4,r4,-1
@@ -244,6 +244,8 @@ memcmp:
244 subf. r3,r0,r3 244 subf. r3,r0,r3
245 bdnzt 2,1b 245 bdnzt 2,1b
246 blr 246 blr
2472: li r3,0
248 blr
247 249
248 250
249/* 251/*
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index ee0dc41d7c56..f39073511a49 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -306,11 +306,14 @@ fi
306 306
307# post-processing needed for some platforms 307# post-processing needed for some platforms
308case "$platform" in 308case "$platform" in
309pseries|chrp) 309pseries)
310 ${CROSS}objcopy -O binary -j .fakeelf "$kernel" "$ofile".rpanote 310 ${CROSS}objcopy -O binary -j .fakeelf "$kernel" "$ofile".rpanote
311 $objbin/addnote "$ofile" "$ofile".rpanote 311 $objbin/addnote "$ofile" "$ofile".rpanote
312 rm -r "$ofile".rpanote 312 rm -r "$ofile".rpanote
313 ;; 313 ;;
314chrp)
315 $objbin/addnote -r c00000 "$ofile"
316 ;;
314coff) 317coff)
315 ${CROSS}objcopy -O aixcoff-rs6000 --set-start "$entry" "$ofile" 318 ${CROSS}objcopy -O aixcoff-rs6000 --set-start "$entry" "$ofile"
316 $objbin/hack-coff "$ofile" 319 $objbin/hack-coff "$ofile"
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
new file mode 100644
index 000000000000..39bd9eb453f0
--- /dev/null
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -0,0 +1,921 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.27-rc5
4# Mon Oct 13 13:47:16 2008
5#
6# CONFIG_PPC64 is not set
7
8#
9# Processor support
10#
11# CONFIG_6xx is not set
12# CONFIG_PPC_85xx is not set
13# CONFIG_PPC_8xx is not set
14CONFIG_40x=y
15# CONFIG_44x is not set
16# CONFIG_E200 is not set
17CONFIG_4xx=y
18# CONFIG_PPC_MM_SLICES is not set
19CONFIG_NOT_COHERENT_CACHE=y
20CONFIG_PPC32=y
21CONFIG_WORD_SIZE=32
22CONFIG_PPC_MERGE=y
23CONFIG_MMU=y
24CONFIG_GENERIC_CMOS_UPDATE=y
25CONFIG_GENERIC_TIME=y
26CONFIG_GENERIC_TIME_VSYSCALL=y
27CONFIG_GENERIC_CLOCKEVENTS=y
28CONFIG_GENERIC_HARDIRQS=y
29# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
30CONFIG_IRQ_PER_CPU=y
31CONFIG_STACKTRACE_SUPPORT=y
32CONFIG_HAVE_LATENCYTOP_SUPPORT=y
33CONFIG_LOCKDEP_SUPPORT=y
34CONFIG_RWSEM_XCHGADD_ALGORITHM=y
35CONFIG_ARCH_HAS_ILOG2_U32=y
36CONFIG_GENERIC_HWEIGHT=y
37CONFIG_GENERIC_CALIBRATE_DELAY=y
38CONFIG_GENERIC_FIND_NEXT_BIT=y
39# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
40CONFIG_PPC=y
41CONFIG_EARLY_PRINTK=y
42CONFIG_GENERIC_NVRAM=y
43CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
44CONFIG_ARCH_MAY_HAVE_PC_FDC=y
45CONFIG_PPC_OF=y
46CONFIG_OF=y
47CONFIG_PPC_UDBG_16550=y
48# CONFIG_GENERIC_TBSYNC is not set
49CONFIG_AUDIT_ARCH=y
50CONFIG_GENERIC_BUG=y
51# CONFIG_DEFAULT_UIMAGE is not set
52CONFIG_PPC_DCR_NATIVE=y
53# CONFIG_PPC_DCR_MMIO is not set
54CONFIG_PPC_DCR=y
55CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
56
57#
58# General setup
59#
60CONFIG_EXPERIMENTAL=y
61CONFIG_BROKEN_ON_SMP=y
62CONFIG_INIT_ENV_ARG_LIMIT=32
63CONFIG_LOCALVERSION=""
64CONFIG_LOCALVERSION_AUTO=y
65CONFIG_SWAP=y
66CONFIG_SYSVIPC=y
67CONFIG_SYSVIPC_SYSCTL=y
68CONFIG_POSIX_MQUEUE=y
69# CONFIG_BSD_PROCESS_ACCT is not set
70# CONFIG_TASKSTATS is not set
71# CONFIG_AUDIT is not set
72# CONFIG_IKCONFIG is not set
73CONFIG_LOG_BUF_SHIFT=14
74# CONFIG_CGROUPS is not set
75CONFIG_GROUP_SCHED=y
76# CONFIG_FAIR_GROUP_SCHED is not set
77# CONFIG_RT_GROUP_SCHED is not set
78CONFIG_USER_SCHED=y
79# CONFIG_CGROUP_SCHED is not set
80CONFIG_SYSFS_DEPRECATED=y
81CONFIG_SYSFS_DEPRECATED_V2=y
82# CONFIG_RELAY is not set
83# CONFIG_NAMESPACES is not set
84CONFIG_BLK_DEV_INITRD=y
85CONFIG_INITRAMFS_SOURCE=""
86# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
87CONFIG_SYSCTL=y
88CONFIG_EMBEDDED=y
89CONFIG_SYSCTL_SYSCALL=y
90CONFIG_KALLSYMS=y
91CONFIG_KALLSYMS_ALL=y
92CONFIG_KALLSYMS_EXTRA_PASS=y
93CONFIG_HOTPLUG=y
94CONFIG_PRINTK=y
95CONFIG_BUG=y
96CONFIG_ELF_CORE=y
97CONFIG_COMPAT_BRK=y
98CONFIG_BASE_FULL=y
99CONFIG_FUTEX=y
100CONFIG_ANON_INODES=y
101CONFIG_EPOLL=y
102CONFIG_SIGNALFD=y
103CONFIG_TIMERFD=y
104CONFIG_EVENTFD=y
105CONFIG_SHMEM=y
106CONFIG_VM_EVENT_COUNTERS=y
107CONFIG_SLUB_DEBUG=y
108# CONFIG_SLAB is not set
109CONFIG_SLUB=y
110# CONFIG_SLOB is not set
111# CONFIG_PROFILING is not set
112# CONFIG_MARKERS is not set
113CONFIG_HAVE_OPROFILE=y
114# CONFIG_KPROBES is not set
115CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
116CONFIG_HAVE_IOREMAP_PROT=y
117CONFIG_HAVE_KPROBES=y
118CONFIG_HAVE_KRETPROBES=y
119CONFIG_HAVE_ARCH_TRACEHOOK=y
120# CONFIG_HAVE_DMA_ATTRS is not set
121# CONFIG_USE_GENERIC_SMP_HELPERS is not set
122# CONFIG_HAVE_CLK is not set
123CONFIG_PROC_PAGE_MONITOR=y
124# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
125CONFIG_SLABINFO=y
126CONFIG_RT_MUTEXES=y
127# CONFIG_TINY_SHMEM is not set
128CONFIG_BASE_SMALL=0
129CONFIG_MODULES=y
130# CONFIG_MODULE_FORCE_LOAD is not set
131CONFIG_MODULE_UNLOAD=y
132# CONFIG_MODULE_FORCE_UNLOAD is not set
133# CONFIG_MODVERSIONS is not set
134# CONFIG_MODULE_SRCVERSION_ALL is not set
135CONFIG_KMOD=y
136CONFIG_BLOCK=y
137CONFIG_LBD=y
138# CONFIG_BLK_DEV_IO_TRACE is not set
139# CONFIG_LSF is not set
140# CONFIG_BLK_DEV_BSG is not set
141# CONFIG_BLK_DEV_INTEGRITY is not set
142
143#
144# IO Schedulers
145#
146CONFIG_IOSCHED_NOOP=y
147CONFIG_IOSCHED_AS=y
148CONFIG_IOSCHED_DEADLINE=y
149CONFIG_IOSCHED_CFQ=y
150CONFIG_DEFAULT_AS=y
151# CONFIG_DEFAULT_DEADLINE is not set
152# CONFIG_DEFAULT_CFQ is not set
153# CONFIG_DEFAULT_NOOP is not set
154CONFIG_DEFAULT_IOSCHED="anticipatory"
155CONFIG_CLASSIC_RCU=y
156# CONFIG_PPC4xx_PCI_EXPRESS is not set
157
158#
159# Platform support
160#
161# CONFIG_PPC_CELL is not set
162# CONFIG_PPC_CELL_NATIVE is not set
163# CONFIG_PQ2ADS is not set
164CONFIG_ACADIA=y
165# CONFIG_EP405 is not set
166# CONFIG_KILAUEA is not set
167# CONFIG_MAKALU is not set
168# CONFIG_WALNUT is not set
169# CONFIG_XILINX_VIRTEX_GENERIC_BOARD is not set
170CONFIG_PPC40x_SIMPLE=y
171CONFIG_405EZ=y
172# CONFIG_IPIC is not set
173# CONFIG_MPIC is not set
174# CONFIG_MPIC_WEIRD is not set
175# CONFIG_PPC_I8259 is not set
176# CONFIG_PPC_RTAS is not set
177# CONFIG_MMIO_NVRAM is not set
178# CONFIG_PPC_MPC106 is not set
179# CONFIG_PPC_970_NAP is not set
180# CONFIG_PPC_INDIRECT_IO is not set
181# CONFIG_GENERIC_IOMAP is not set
182# CONFIG_CPU_FREQ is not set
183# CONFIG_FSL_ULI1575 is not set
184
185#
186# Kernel options
187#
188# CONFIG_HIGHMEM is not set
189# CONFIG_TICK_ONESHOT is not set
190# CONFIG_NO_HZ is not set
191# CONFIG_HIGH_RES_TIMERS is not set
192CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
193# CONFIG_HZ_100 is not set
194CONFIG_HZ_250=y
195# CONFIG_HZ_300 is not set
196# CONFIG_HZ_1000 is not set
197CONFIG_HZ=250
198# CONFIG_SCHED_HRTICK is not set
199CONFIG_PREEMPT_NONE=y
200# CONFIG_PREEMPT_VOLUNTARY is not set
201# CONFIG_PREEMPT is not set
202CONFIG_BINFMT_ELF=y
203# CONFIG_BINFMT_MISC is not set
204# CONFIG_MATH_EMULATION is not set
205# CONFIG_IOMMU_HELPER is not set
206CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
207CONFIG_ARCH_HAS_WALK_MEMORY=y
208CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
209CONFIG_ARCH_FLATMEM_ENABLE=y
210CONFIG_ARCH_POPULATES_NODE_MAP=y
211CONFIG_SELECT_MEMORY_MODEL=y
212CONFIG_FLATMEM_MANUAL=y
213# CONFIG_DISCONTIGMEM_MANUAL is not set
214# CONFIG_SPARSEMEM_MANUAL is not set
215CONFIG_FLATMEM=y
216CONFIG_FLAT_NODE_MEM_MAP=y
217# CONFIG_SPARSEMEM_STATIC is not set
218# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
219CONFIG_PAGEFLAGS_EXTENDED=y
220CONFIG_SPLIT_PTLOCK_CPUS=4
221CONFIG_MIGRATION=y
222# CONFIG_RESOURCES_64BIT is not set
223CONFIG_ZONE_DMA_FLAG=1
224CONFIG_BOUNCE=y
225CONFIG_VIRT_TO_BUS=y
226CONFIG_FORCE_MAX_ZONEORDER=11
227CONFIG_PROC_DEVICETREE=y
228# CONFIG_CMDLINE_BOOL is not set
229CONFIG_EXTRA_TARGETS=""
230# CONFIG_PM is not set
231CONFIG_SECCOMP=y
232CONFIG_ISA_DMA_API=y
233
234#
235# Bus options
236#
237CONFIG_ZONE_DMA=y
238CONFIG_PPC_INDIRECT_PCI=y
239CONFIG_4xx_SOC=y
240CONFIG_PPC_PCI_CHOICE=y
241CONFIG_PCI=y
242CONFIG_PCI_DOMAINS=y
243CONFIG_PCI_SYSCALL=y
244# CONFIG_PCIEPORTBUS is not set
245CONFIG_ARCH_SUPPORTS_MSI=y
246# CONFIG_PCI_MSI is not set
247CONFIG_PCI_LEGACY=y
248# CONFIG_PCI_DEBUG is not set
249# CONFIG_PCCARD is not set
250# CONFIG_HOTPLUG_PCI is not set
251# CONFIG_HAS_RAPIDIO is not set
252
253#
254# Advanced setup
255#
256# CONFIG_ADVANCED_OPTIONS is not set
257
258#
259# Default settings for advanced configuration options are used
260#
261CONFIG_LOWMEM_SIZE=0x30000000
262CONFIG_PAGE_OFFSET=0xc0000000
263CONFIG_KERNEL_START=0xc0000000
264CONFIG_PHYSICAL_START=0x00000000
265CONFIG_TASK_SIZE=0xc0000000
266CONFIG_CONSISTENT_START=0xff100000
267CONFIG_CONSISTENT_SIZE=0x00200000
268CONFIG_NET=y
269
270#
271# Networking options
272#
273CONFIG_PACKET=y
274# CONFIG_PACKET_MMAP is not set
275CONFIG_UNIX=y
276# CONFIG_NET_KEY is not set
277CONFIG_INET=y
278# CONFIG_IP_MULTICAST is not set
279# CONFIG_IP_ADVANCED_ROUTER is not set
280CONFIG_IP_FIB_HASH=y
281CONFIG_IP_PNP=y
282CONFIG_IP_PNP_DHCP=y
283CONFIG_IP_PNP_BOOTP=y
284# CONFIG_IP_PNP_RARP is not set
285# CONFIG_NET_IPIP is not set
286# CONFIG_NET_IPGRE is not set
287# CONFIG_ARPD is not set
288# CONFIG_SYN_COOKIES is not set
289# CONFIG_INET_AH is not set
290# CONFIG_INET_ESP is not set
291# CONFIG_INET_IPCOMP is not set
292# CONFIG_INET_XFRM_TUNNEL is not set
293# CONFIG_INET_TUNNEL is not set
294# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
295# CONFIG_INET_XFRM_MODE_TUNNEL is not set
296# CONFIG_INET_XFRM_MODE_BEET is not set
297# CONFIG_INET_LRO is not set
298CONFIG_INET_DIAG=y
299CONFIG_INET_TCP_DIAG=y
300# CONFIG_TCP_CONG_ADVANCED is not set
301CONFIG_TCP_CONG_CUBIC=y
302CONFIG_DEFAULT_TCP_CONG="cubic"
303# CONFIG_TCP_MD5SIG is not set
304# CONFIG_IPV6 is not set
305# CONFIG_NETWORK_SECMARK is not set
306# CONFIG_NETFILTER is not set
307# CONFIG_IP_DCCP is not set
308# CONFIG_IP_SCTP is not set
309# CONFIG_TIPC is not set
310# CONFIG_ATM is not set
311# CONFIG_BRIDGE is not set
312# CONFIG_VLAN_8021Q is not set
313# CONFIG_DECNET is not set
314# CONFIG_LLC2 is not set
315# CONFIG_IPX is not set
316# CONFIG_ATALK is not set
317# CONFIG_X25 is not set
318# CONFIG_LAPB is not set
319# CONFIG_ECONET is not set
320# CONFIG_WAN_ROUTER is not set
321# CONFIG_NET_SCHED is not set
322
323#
324# Network testing
325#
326# CONFIG_NET_PKTGEN is not set
327# CONFIG_HAMRADIO is not set
328# CONFIG_CAN is not set
329# CONFIG_IRDA is not set
330# CONFIG_BT is not set
331# CONFIG_AF_RXRPC is not set
332
333#
334# Wireless
335#
336# CONFIG_CFG80211 is not set
337# CONFIG_WIRELESS_EXT is not set
338# CONFIG_MAC80211 is not set
339# CONFIG_IEEE80211 is not set
340# CONFIG_RFKILL is not set
341# CONFIG_NET_9P is not set
342
343#
344# Device Drivers
345#
346
347#
348# Generic Driver Options
349#
350CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
351CONFIG_STANDALONE=y
352CONFIG_PREVENT_FIRMWARE_BUILD=y
353CONFIG_FW_LOADER=y
354CONFIG_FIRMWARE_IN_KERNEL=y
355CONFIG_EXTRA_FIRMWARE=""
356# CONFIG_DEBUG_DRIVER is not set
357# CONFIG_DEBUG_DEVRES is not set
358# CONFIG_SYS_HYPERVISOR is not set
359CONFIG_CONNECTOR=y
360CONFIG_PROC_EVENTS=y
361CONFIG_MTD=y
362# CONFIG_MTD_DEBUG is not set
363# CONFIG_MTD_CONCAT is not set
364CONFIG_MTD_PARTITIONS=y
365# CONFIG_MTD_REDBOOT_PARTS is not set
366CONFIG_MTD_CMDLINE_PARTS=y
367CONFIG_MTD_OF_PARTS=y
368# CONFIG_MTD_AR7_PARTS is not set
369
370#
371# User Modules And Translation Layers
372#
373CONFIG_MTD_CHAR=y
374CONFIG_MTD_BLKDEVS=m
375CONFIG_MTD_BLOCK=m
376# CONFIG_MTD_BLOCK_RO is not set
377# CONFIG_FTL is not set
378# CONFIG_NFTL is not set
379# CONFIG_INFTL is not set
380# CONFIG_RFD_FTL is not set
381# CONFIG_SSFDC is not set
382# CONFIG_MTD_OOPS is not set
383
384#
385# RAM/ROM/Flash chip drivers
386#
387CONFIG_MTD_CFI=y
388CONFIG_MTD_JEDECPROBE=y
389CONFIG_MTD_GEN_PROBE=y
390# CONFIG_MTD_CFI_ADV_OPTIONS is not set
391CONFIG_MTD_MAP_BANK_WIDTH_1=y
392CONFIG_MTD_MAP_BANK_WIDTH_2=y
393CONFIG_MTD_MAP_BANK_WIDTH_4=y
394# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
395# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
396# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
397CONFIG_MTD_CFI_I1=y
398CONFIG_MTD_CFI_I2=y
399# CONFIG_MTD_CFI_I4 is not set
400# CONFIG_MTD_CFI_I8 is not set
401# CONFIG_MTD_CFI_INTELEXT is not set
402CONFIG_MTD_CFI_AMDSTD=y
403# CONFIG_MTD_CFI_STAA is not set
404CONFIG_MTD_CFI_UTIL=y
405# CONFIG_MTD_RAM is not set
406# CONFIG_MTD_ROM is not set
407# CONFIG_MTD_ABSENT is not set
408
409#
410# Mapping drivers for chip access
411#
412# CONFIG_MTD_COMPLEX_MAPPINGS is not set
413# CONFIG_MTD_PHYSMAP is not set
414CONFIG_MTD_PHYSMAP_OF=y
415# CONFIG_MTD_INTEL_VR_NOR is not set
416# CONFIG_MTD_PLATRAM is not set
417
418#
419# Self-contained MTD device drivers
420#
421# CONFIG_MTD_PMC551 is not set
422# CONFIG_MTD_SLRAM is not set
423# CONFIG_MTD_PHRAM is not set
424# CONFIG_MTD_MTDRAM is not set
425# CONFIG_MTD_BLOCK2MTD is not set
426
427#
428# Disk-On-Chip Device Drivers
429#
430# CONFIG_MTD_DOC2000 is not set
431# CONFIG_MTD_DOC2001 is not set
432# CONFIG_MTD_DOC2001PLUS is not set
433# CONFIG_MTD_NAND is not set
434# CONFIG_MTD_ONENAND is not set
435
436#
437# UBI - Unsorted block images
438#
439# CONFIG_MTD_UBI is not set
440CONFIG_OF_DEVICE=y
441# CONFIG_PARPORT is not set
442CONFIG_BLK_DEV=y
443# CONFIG_BLK_DEV_FD is not set
444# CONFIG_BLK_CPQ_DA is not set
445# CONFIG_BLK_CPQ_CISS_DA is not set
446# CONFIG_BLK_DEV_DAC960 is not set
447# CONFIG_BLK_DEV_UMEM is not set
448# CONFIG_BLK_DEV_COW_COMMON is not set
449# CONFIG_BLK_DEV_LOOP is not set
450# CONFIG_BLK_DEV_NBD is not set
451# CONFIG_BLK_DEV_SX8 is not set
452CONFIG_BLK_DEV_RAM=y
453CONFIG_BLK_DEV_RAM_COUNT=16
454CONFIG_BLK_DEV_RAM_SIZE=35000
455# CONFIG_BLK_DEV_XIP is not set
456# CONFIG_CDROM_PKTCDVD is not set
457# CONFIG_ATA_OVER_ETH is not set
458# CONFIG_XILINX_SYSACE is not set
459# CONFIG_BLK_DEV_HD is not set
460# CONFIG_MISC_DEVICES is not set
461CONFIG_HAVE_IDE=y
462# CONFIG_IDE is not set
463
464#
465# SCSI device support
466#
467# CONFIG_RAID_ATTRS is not set
468# CONFIG_SCSI is not set
469# CONFIG_SCSI_DMA is not set
470# CONFIG_SCSI_NETLINK is not set
471# CONFIG_ATA is not set
472# CONFIG_MD is not set
473# CONFIG_FUSION is not set
474
475#
476# IEEE 1394 (FireWire) support
477#
478
479#
480# Enable only one of the two stacks, unless you know what you are doing
481#
482# CONFIG_FIREWIRE is not set
483# CONFIG_IEEE1394 is not set
484# CONFIG_I2O is not set
485# CONFIG_MACINTOSH_DRIVERS is not set
486CONFIG_NETDEVICES=y
487# CONFIG_DUMMY is not set
488# CONFIG_BONDING is not set
489# CONFIG_MACVLAN is not set
490# CONFIG_EQUALIZER is not set
491# CONFIG_TUN is not set
492# CONFIG_VETH is not set
493# CONFIG_ARCNET is not set
494# CONFIG_PHYLIB is not set
495CONFIG_NET_ETHERNET=y
496CONFIG_MII=y
497# CONFIG_HAPPYMEAL is not set
498# CONFIG_SUNGEM is not set
499# CONFIG_CASSINI is not set
500# CONFIG_NET_VENDOR_3COM is not set
501# CONFIG_NET_TULIP is not set
502# CONFIG_HP100 is not set
503CONFIG_IBM_NEW_EMAC=y
504CONFIG_IBM_NEW_EMAC_RXB=256
505CONFIG_IBM_NEW_EMAC_TXB=256
506CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
507CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
508CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
509CONFIG_IBM_NEW_EMAC_DEBUG=y
510# CONFIG_IBM_NEW_EMAC_ZMII is not set
511# CONFIG_IBM_NEW_EMAC_RGMII is not set
512# CONFIG_IBM_NEW_EMAC_TAH is not set
513# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
514CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL=y
515CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT=y
516CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR=y
517# CONFIG_NET_PCI is not set
518# CONFIG_B44 is not set
519# CONFIG_NETDEV_1000 is not set
520# CONFIG_NETDEV_10000 is not set
521# CONFIG_TR is not set
522
523#
524# Wireless LAN
525#
526# CONFIG_WLAN_PRE80211 is not set
527# CONFIG_WLAN_80211 is not set
528# CONFIG_IWLWIFI_LEDS is not set
529# CONFIG_WAN is not set
530# CONFIG_FDDI is not set
531# CONFIG_HIPPI is not set
532# CONFIG_PPP is not set
533# CONFIG_SLIP is not set
534# CONFIG_NETCONSOLE is not set
535# CONFIG_NETPOLL is not set
536# CONFIG_NET_POLL_CONTROLLER is not set
537# CONFIG_ISDN is not set
538# CONFIG_PHONE is not set
539
540#
541# Input device support
542#
543# CONFIG_INPUT is not set
544
545#
546# Hardware I/O ports
547#
548# CONFIG_SERIO is not set
549# CONFIG_GAMEPORT is not set
550
551#
552# Character devices
553#
554# CONFIG_VT is not set
555CONFIG_DEVKMEM=y
556# CONFIG_SERIAL_NONSTANDARD is not set
557# CONFIG_NOZOMI is not set
558
559#
560# Serial drivers
561#
562CONFIG_SERIAL_8250=y
563CONFIG_SERIAL_8250_CONSOLE=y
564CONFIG_SERIAL_8250_PCI=y
565CONFIG_SERIAL_8250_NR_UARTS=4
566CONFIG_SERIAL_8250_RUNTIME_UARTS=4
567CONFIG_SERIAL_8250_EXTENDED=y
568# CONFIG_SERIAL_8250_MANY_PORTS is not set
569CONFIG_SERIAL_8250_SHARE_IRQ=y
570# CONFIG_SERIAL_8250_DETECT_IRQ is not set
571# CONFIG_SERIAL_8250_RSA is not set
572
573#
574# Non-8250 serial port support
575#
576# CONFIG_SERIAL_UARTLITE is not set
577CONFIG_SERIAL_CORE=y
578CONFIG_SERIAL_CORE_CONSOLE=y
579# CONFIG_SERIAL_JSM is not set
580CONFIG_SERIAL_OF_PLATFORM=y
581CONFIG_UNIX98_PTYS=y
582CONFIG_LEGACY_PTYS=y
583CONFIG_LEGACY_PTY_COUNT=256
584# CONFIG_IPMI_HANDLER is not set
585# CONFIG_HW_RANDOM is not set
586# CONFIG_NVRAM is not set
587# CONFIG_GEN_RTC is not set
588# CONFIG_R3964 is not set
589# CONFIG_APPLICOM is not set
590# CONFIG_RAW_DRIVER is not set
591# CONFIG_TCG_TPM is not set
592CONFIG_DEVPORT=y
593# CONFIG_I2C is not set
594# CONFIG_SPI is not set
595CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
596# CONFIG_GPIOLIB is not set
597# CONFIG_W1 is not set
598# CONFIG_POWER_SUPPLY is not set
599# CONFIG_HWMON is not set
600CONFIG_THERMAL=y
601# CONFIG_WATCHDOG is not set
602
603#
604# Sonics Silicon Backplane
605#
606CONFIG_SSB_POSSIBLE=y
607# CONFIG_SSB is not set
608
609#
610# Multifunction device drivers
611#
612# CONFIG_MFD_CORE is not set
613# CONFIG_MFD_SM501 is not set
614# CONFIG_HTC_PASIC3 is not set
615# CONFIG_MFD_TMIO is not set
616
617#
618# Multimedia devices
619#
620
621#
622# Multimedia core support
623#
624# CONFIG_VIDEO_DEV is not set
625# CONFIG_DVB_CORE is not set
626# CONFIG_VIDEO_MEDIA is not set
627
628#
629# Multimedia drivers
630#
631# CONFIG_DAB is not set
632
633#
634# Graphics support
635#
636# CONFIG_AGP is not set
637# CONFIG_DRM is not set
638# CONFIG_VGASTATE is not set
639# CONFIG_VIDEO_OUTPUT_CONTROL is not set
640# CONFIG_FB is not set
641# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
642
643#
644# Display device support
645#
646# CONFIG_DISPLAY_SUPPORT is not set
647# CONFIG_SOUND is not set
648# CONFIG_USB_SUPPORT is not set
649# CONFIG_MMC is not set
650# CONFIG_MEMSTICK is not set
651# CONFIG_NEW_LEDS is not set
652# CONFIG_ACCESSIBILITY is not set
653# CONFIG_INFINIBAND is not set
654# CONFIG_EDAC is not set
655# CONFIG_RTC_CLASS is not set
656# CONFIG_DMADEVICES is not set
657# CONFIG_UIO is not set
658
659#
660# File systems
661#
662CONFIG_EXT2_FS=y
663# CONFIG_EXT2_FS_XATTR is not set
664# CONFIG_EXT2_FS_XIP is not set
665# CONFIG_EXT3_FS is not set
666# CONFIG_EXT4DEV_FS is not set
667# CONFIG_REISERFS_FS is not set
668# CONFIG_JFS_FS is not set
669# CONFIG_FS_POSIX_ACL is not set
670# CONFIG_XFS_FS is not set
671# CONFIG_OCFS2_FS is not set
672CONFIG_DNOTIFY=y
673CONFIG_INOTIFY=y
674CONFIG_INOTIFY_USER=y
675# CONFIG_QUOTA is not set
676# CONFIG_AUTOFS_FS is not set
677# CONFIG_AUTOFS4_FS is not set
678# CONFIG_FUSE_FS is not set
679
680#
681# CD-ROM/DVD Filesystems
682#
683# CONFIG_ISO9660_FS is not set
684# CONFIG_UDF_FS is not set
685
686#
687# DOS/FAT/NT Filesystems
688#
689# CONFIG_MSDOS_FS is not set
690# CONFIG_VFAT_FS is not set
691# CONFIG_NTFS_FS is not set
692
693#
694# Pseudo filesystems
695#
696CONFIG_PROC_FS=y
697CONFIG_PROC_KCORE=y
698CONFIG_PROC_SYSCTL=y
699CONFIG_SYSFS=y
700CONFIG_TMPFS=y
701# CONFIG_TMPFS_POSIX_ACL is not set
702# CONFIG_HUGETLB_PAGE is not set
703# CONFIG_CONFIGFS_FS is not set
704
705#
706# Miscellaneous filesystems
707#
708# CONFIG_ADFS_FS is not set
709# CONFIG_AFFS_FS is not set
710# CONFIG_HFS_FS is not set
711# CONFIG_HFSPLUS_FS is not set
712# CONFIG_BEFS_FS is not set
713# CONFIG_BFS_FS is not set
714# CONFIG_EFS_FS is not set
715# CONFIG_JFFS2_FS is not set
716CONFIG_CRAMFS=y
717# CONFIG_VXFS_FS is not set
718# CONFIG_MINIX_FS is not set
719# CONFIG_OMFS_FS is not set
720# CONFIG_HPFS_FS is not set
721# CONFIG_QNX4FS_FS is not set
722# CONFIG_ROMFS_FS is not set
723# CONFIG_SYSV_FS is not set
724# CONFIG_UFS_FS is not set
725CONFIG_NETWORK_FILESYSTEMS=y
726CONFIG_NFS_FS=y
727CONFIG_NFS_V3=y
728# CONFIG_NFS_V3_ACL is not set
729# CONFIG_NFS_V4 is not set
730CONFIG_ROOT_NFS=y
731# CONFIG_NFSD is not set
732CONFIG_LOCKD=y
733CONFIG_LOCKD_V4=y
734CONFIG_NFS_COMMON=y
735CONFIG_SUNRPC=y
736# CONFIG_RPCSEC_GSS_KRB5 is not set
737# CONFIG_RPCSEC_GSS_SPKM3 is not set
738# CONFIG_SMB_FS is not set
739# CONFIG_CIFS is not set
740# CONFIG_NCP_FS is not set
741# CONFIG_CODA_FS is not set
742# CONFIG_AFS_FS is not set
743
744#
745# Partition Types
746#
747# CONFIG_PARTITION_ADVANCED is not set
748CONFIG_MSDOS_PARTITION=y
749# CONFIG_NLS is not set
750# CONFIG_DLM is not set
751
752#
753# Library routines
754#
755CONFIG_BITREVERSE=y
756# CONFIG_GENERIC_FIND_FIRST_BIT is not set
757# CONFIG_CRC_CCITT is not set
758# CONFIG_CRC16 is not set
759# CONFIG_CRC_T10DIF is not set
760# CONFIG_CRC_ITU_T is not set
761CONFIG_CRC32=y
762# CONFIG_CRC7 is not set
763# CONFIG_LIBCRC32C is not set
764CONFIG_ZLIB_INFLATE=y
765CONFIG_PLIST=y
766CONFIG_HAS_IOMEM=y
767CONFIG_HAS_IOPORT=y
768CONFIG_HAS_DMA=y
769CONFIG_HAVE_LMB=y
770
771#
772# Kernel hacking
773#
774# CONFIG_PRINTK_TIME is not set
775CONFIG_ENABLE_WARN_DEPRECATED=y
776CONFIG_ENABLE_MUST_CHECK=y
777CONFIG_FRAME_WARN=1024
778CONFIG_MAGIC_SYSRQ=y
779# CONFIG_UNUSED_SYMBOLS is not set
780CONFIG_DEBUG_FS=y
781# CONFIG_HEADERS_CHECK is not set
782CONFIG_DEBUG_KERNEL=y
783# CONFIG_DEBUG_SHIRQ is not set
784CONFIG_DETECT_SOFTLOCKUP=y
785# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
786CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
787CONFIG_SCHED_DEBUG=y
788# CONFIG_SCHEDSTATS is not set
789# CONFIG_TIMER_STATS is not set
790# CONFIG_DEBUG_OBJECTS is not set
791# CONFIG_SLUB_DEBUG_ON is not set
792# CONFIG_SLUB_STATS is not set
793# CONFIG_DEBUG_RT_MUTEXES is not set
794# CONFIG_RT_MUTEX_TESTER is not set
795# CONFIG_DEBUG_SPINLOCK is not set
796# CONFIG_DEBUG_MUTEXES is not set
797# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
798# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
799# CONFIG_DEBUG_KOBJECT is not set
800CONFIG_DEBUG_BUGVERBOSE=y
801# CONFIG_DEBUG_INFO is not set
802# CONFIG_DEBUG_VM is not set
803# CONFIG_DEBUG_WRITECOUNT is not set
804# CONFIG_DEBUG_MEMORY_INIT is not set
805# CONFIG_DEBUG_LIST is not set
806# CONFIG_DEBUG_SG is not set
807# CONFIG_BOOT_PRINTK_DELAY is not set
808# CONFIG_RCU_TORTURE_TEST is not set
809# CONFIG_BACKTRACE_SELF_TEST is not set
810# CONFIG_FAULT_INJECTION is not set
811# CONFIG_LATENCYTOP is not set
812CONFIG_SYSCTL_SYSCALL_CHECK=y
813CONFIG_HAVE_FTRACE=y
814CONFIG_HAVE_DYNAMIC_FTRACE=y
815# CONFIG_FTRACE is not set
816# CONFIG_SCHED_TRACER is not set
817# CONFIG_CONTEXT_SWITCH_TRACER is not set
818# CONFIG_SAMPLES is not set
819CONFIG_HAVE_ARCH_KGDB=y
820# CONFIG_KGDB is not set
821# CONFIG_DEBUG_STACKOVERFLOW is not set
822# CONFIG_DEBUG_STACK_USAGE is not set
823# CONFIG_DEBUG_PAGEALLOC is not set
824# CONFIG_CODE_PATCHING_SELFTEST is not set
825# CONFIG_FTR_FIXUP_SELFTEST is not set
826# CONFIG_MSI_BITMAP_SELFTEST is not set
827# CONFIG_XMON is not set
828# CONFIG_IRQSTACKS is not set
829# CONFIG_VIRQ_DEBUG is not set
830# CONFIG_BDI_SWITCH is not set
831# CONFIG_PPC_EARLY_DEBUG is not set
832
833#
834# Security options
835#
836# CONFIG_KEYS is not set
837# CONFIG_SECURITY is not set
838# CONFIG_SECURITY_FILE_CAPABILITIES is not set
839CONFIG_CRYPTO=y
840
841#
842# Crypto core or helper
843#
844CONFIG_CRYPTO_ALGAPI=y
845CONFIG_CRYPTO_BLKCIPHER=y
846CONFIG_CRYPTO_MANAGER=y
847# CONFIG_CRYPTO_GF128MUL is not set
848# CONFIG_CRYPTO_NULL is not set
849# CONFIG_CRYPTO_CRYPTD is not set
850# CONFIG_CRYPTO_AUTHENC is not set
851# CONFIG_CRYPTO_TEST is not set
852
853#
854# Authenticated Encryption with Associated Data
855#
856# CONFIG_CRYPTO_CCM is not set
857# CONFIG_CRYPTO_GCM is not set
858# CONFIG_CRYPTO_SEQIV is not set
859
860#
861# Block modes
862#
863CONFIG_CRYPTO_CBC=y
864# CONFIG_CRYPTO_CTR is not set
865# CONFIG_CRYPTO_CTS is not set
866CONFIG_CRYPTO_ECB=y
867# CONFIG_CRYPTO_LRW is not set
868CONFIG_CRYPTO_PCBC=y
869# CONFIG_CRYPTO_XTS is not set
870
871#
872# Hash modes
873#
874# CONFIG_CRYPTO_HMAC is not set
875# CONFIG_CRYPTO_XCBC is not set
876
877#
878# Digest
879#
880# CONFIG_CRYPTO_CRC32C is not set
881# CONFIG_CRYPTO_MD4 is not set
882CONFIG_CRYPTO_MD5=y
883# CONFIG_CRYPTO_MICHAEL_MIC is not set
884# CONFIG_CRYPTO_RMD128 is not set
885# CONFIG_CRYPTO_RMD160 is not set
886# CONFIG_CRYPTO_RMD256 is not set
887# CONFIG_CRYPTO_RMD320 is not set
888# CONFIG_CRYPTO_SHA1 is not set
889# CONFIG_CRYPTO_SHA256 is not set
890# CONFIG_CRYPTO_SHA512 is not set
891# CONFIG_CRYPTO_TGR192 is not set
892# CONFIG_CRYPTO_WP512 is not set
893
894#
895# Ciphers
896#
897# CONFIG_CRYPTO_AES is not set
898# CONFIG_CRYPTO_ANUBIS is not set
899# CONFIG_CRYPTO_ARC4 is not set
900# CONFIG_CRYPTO_BLOWFISH is not set
901# CONFIG_CRYPTO_CAMELLIA is not set
902# CONFIG_CRYPTO_CAST5 is not set
903# CONFIG_CRYPTO_CAST6 is not set
904CONFIG_CRYPTO_DES=y
905# CONFIG_CRYPTO_FCRYPT is not set
906# CONFIG_CRYPTO_KHAZAD is not set
907# CONFIG_CRYPTO_SALSA20 is not set
908# CONFIG_CRYPTO_SEED is not set
909# CONFIG_CRYPTO_SERPENT is not set
910# CONFIG_CRYPTO_TEA is not set
911# CONFIG_CRYPTO_TWOFISH is not set
912
913#
914# Compression
915#
916# CONFIG_CRYPTO_DEFLATE is not set
917# CONFIG_CRYPTO_LZO is not set
918CONFIG_CRYPTO_HW=y
919# CONFIG_CRYPTO_DEV_HIFN_795X is not set
920# CONFIG_PPC_CLOCK is not set
921# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/configs/40x/hcu4_defconfig b/arch/powerpc/configs/40x/hcu4_defconfig
new file mode 100644
index 000000000000..682fce02c73a
--- /dev/null
+++ b/arch/powerpc/configs/40x/hcu4_defconfig
@@ -0,0 +1,929 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.26.5
4# Tue Sep 16 00:44:33 2008
5#
6# CONFIG_PPC64 is not set
7
8#
9# Processor support
10#
11# CONFIG_6xx is not set
12# CONFIG_PPC_85xx is not set
13# CONFIG_PPC_8xx is not set
14CONFIG_40x=y
15# CONFIG_44x is not set
16# CONFIG_E200 is not set
17CONFIG_4xx=y
18# CONFIG_PPC_MM_SLICES is not set
19CONFIG_NOT_COHERENT_CACHE=y
20CONFIG_PPC32=y
21CONFIG_WORD_SIZE=32
22CONFIG_PPC_MERGE=y
23CONFIG_MMU=y
24CONFIG_GENERIC_CMOS_UPDATE=y
25CONFIG_GENERIC_TIME=y
26CONFIG_GENERIC_TIME_VSYSCALL=y
27CONFIG_GENERIC_CLOCKEVENTS=y
28CONFIG_GENERIC_HARDIRQS=y
29# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
30CONFIG_IRQ_PER_CPU=y
31CONFIG_STACKTRACE_SUPPORT=y
32CONFIG_LOCKDEP_SUPPORT=y
33CONFIG_RWSEM_XCHGADD_ALGORITHM=y
34CONFIG_ARCH_HAS_ILOG2_U32=y
35CONFIG_GENERIC_HWEIGHT=y
36CONFIG_GENERIC_CALIBRATE_DELAY=y
37CONFIG_GENERIC_FIND_NEXT_BIT=y
38# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
39CONFIG_PPC=y
40CONFIG_EARLY_PRINTK=y
41CONFIG_GENERIC_NVRAM=y
42CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
43CONFIG_ARCH_MAY_HAVE_PC_FDC=y
44CONFIG_PPC_OF=y
45CONFIG_OF=y
46CONFIG_PPC_UDBG_16550=y
47# CONFIG_GENERIC_TBSYNC is not set
48CONFIG_AUDIT_ARCH=y
49CONFIG_GENERIC_BUG=y
50# CONFIG_DEFAULT_UIMAGE is not set
51CONFIG_PPC_DCR_NATIVE=y
52# CONFIG_PPC_DCR_MMIO is not set
53CONFIG_PPC_DCR=y
54CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
55
56#
57# General setup
58#
59CONFIG_EXPERIMENTAL=y
60CONFIG_BROKEN_ON_SMP=y
61CONFIG_INIT_ENV_ARG_LIMIT=32
62CONFIG_LOCALVERSION=""
63CONFIG_LOCALVERSION_AUTO=y
64CONFIG_SWAP=y
65CONFIG_SYSVIPC=y
66CONFIG_SYSVIPC_SYSCTL=y
67CONFIG_POSIX_MQUEUE=y
68# CONFIG_BSD_PROCESS_ACCT is not set
69# CONFIG_TASKSTATS is not set
70# CONFIG_AUDIT is not set
71# CONFIG_IKCONFIG is not set
72CONFIG_LOG_BUF_SHIFT=14
73# CONFIG_CGROUPS is not set
74CONFIG_GROUP_SCHED=y
75CONFIG_FAIR_GROUP_SCHED=y
76# CONFIG_RT_GROUP_SCHED is not set
77CONFIG_USER_SCHED=y
78# CONFIG_CGROUP_SCHED is not set
79CONFIG_SYSFS_DEPRECATED=y
80CONFIG_SYSFS_DEPRECATED_V2=y
81# CONFIG_RELAY is not set
82# CONFIG_NAMESPACES is not set
83CONFIG_BLK_DEV_INITRD=y
84CONFIG_INITRAMFS_SOURCE=""
85# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
86CONFIG_SYSCTL=y
87CONFIG_EMBEDDED=y
88CONFIG_SYSCTL_SYSCALL=y
89CONFIG_SYSCTL_SYSCALL_CHECK=y
90CONFIG_KALLSYMS=y
91CONFIG_KALLSYMS_ALL=y
92CONFIG_KALLSYMS_EXTRA_PASS=y
93CONFIG_HOTPLUG=y
94CONFIG_PRINTK=y
95# CONFIG_LOGBUFFER is not set
96CONFIG_BUG=y
97CONFIG_ELF_CORE=y
98CONFIG_COMPAT_BRK=y
99CONFIG_BASE_FULL=y
100CONFIG_FUTEX=y
101CONFIG_ANON_INODES=y
102CONFIG_EPOLL=y
103CONFIG_SIGNALFD=y
104CONFIG_TIMERFD=y
105CONFIG_EVENTFD=y
106CONFIG_SHMEM=y
107CONFIG_VM_EVENT_COUNTERS=y
108CONFIG_SLUB_DEBUG=y
109# CONFIG_SLAB is not set
110CONFIG_SLUB=y
111# CONFIG_SLOB is not set
112# CONFIG_PROFILING is not set
113# CONFIG_MARKERS is not set
114CONFIG_HAVE_OPROFILE=y
115# CONFIG_KPROBES is not set
116CONFIG_HAVE_KPROBES=y
117CONFIG_HAVE_KRETPROBES=y
118# CONFIG_HAVE_DMA_ATTRS is not set
119CONFIG_PROC_PAGE_MONITOR=y
120CONFIG_SLABINFO=y
121CONFIG_RT_MUTEXES=y
122# CONFIG_TINY_SHMEM is not set
123CONFIG_BASE_SMALL=0
124CONFIG_MODULES=y
125# CONFIG_MODULE_FORCE_LOAD is not set
126CONFIG_MODULE_UNLOAD=y
127# CONFIG_MODULE_FORCE_UNLOAD is not set
128# CONFIG_MODVERSIONS is not set
129# CONFIG_MODULE_SRCVERSION_ALL is not set
130CONFIG_KMOD=y
131CONFIG_BLOCK=y
132CONFIG_LBD=y
133# CONFIG_BLK_DEV_IO_TRACE is not set
134# CONFIG_LSF is not set
135# CONFIG_BLK_DEV_BSG is not set
136
137#
138# IO Schedulers
139#
140CONFIG_IOSCHED_NOOP=y
141CONFIG_IOSCHED_AS=y
142CONFIG_IOSCHED_DEADLINE=y
143CONFIG_IOSCHED_CFQ=y
144CONFIG_DEFAULT_AS=y
145# CONFIG_DEFAULT_DEADLINE is not set
146# CONFIG_DEFAULT_CFQ is not set
147# CONFIG_DEFAULT_NOOP is not set
148CONFIG_DEFAULT_IOSCHED="anticipatory"
149CONFIG_CLASSIC_RCU=y
150# CONFIG_PPC4xx_PCI_EXPRESS is not set
151
152#
153# Platform support
154#
155# CONFIG_PPC_MPC512x is not set
156# CONFIG_PPC_MPC5121 is not set
157# CONFIG_PPC_CELL is not set
158# CONFIG_PPC_CELL_NATIVE is not set
159# CONFIG_PQ2ADS is not set
160# CONFIG_EP405 is not set
161CONFIG_HCU4=y
162# CONFIG_KILAUEA is not set
163# CONFIG_MAKALU is not set
164# CONFIG_WALNUT is not set
165# CONFIG_XILINX_VIRTEX_GENERIC_BOARD is not set
166# CONFIG_IPIC is not set
167# CONFIG_MPIC is not set
168# CONFIG_MPIC_WEIRD is not set
169# CONFIG_PPC_I8259 is not set
170# CONFIG_PPC_RTAS is not set
171# CONFIG_MMIO_NVRAM is not set
172# CONFIG_PPC_MPC106 is not set
173# CONFIG_PPC_970_NAP is not set
174# CONFIG_PPC_INDIRECT_IO is not set
175# CONFIG_GENERIC_IOMAP is not set
176# CONFIG_CPU_FREQ is not set
177# CONFIG_FSL_ULI1575 is not set
178
179#
180# Kernel options
181#
182# CONFIG_HIGHMEM is not set
183# CONFIG_TICK_ONESHOT is not set
184# CONFIG_NO_HZ is not set
185# CONFIG_HIGH_RES_TIMERS is not set
186CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
187# CONFIG_HZ_100 is not set
188CONFIG_HZ_250=y
189# CONFIG_HZ_300 is not set
190# CONFIG_HZ_1000 is not set
191CONFIG_HZ=250
192# CONFIG_SCHED_HRTICK is not set
193CONFIG_PREEMPT_NONE=y
194# CONFIG_PREEMPT_VOLUNTARY is not set
195# CONFIG_PREEMPT is not set
196CONFIG_BINFMT_ELF=y
197# CONFIG_BINFMT_MISC is not set
198# CONFIG_MATH_EMULATION is not set
199# CONFIG_IOMMU_HELPER is not set
200CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
201CONFIG_ARCH_HAS_WALK_MEMORY=y
202CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
203CONFIG_ARCH_FLATMEM_ENABLE=y
204CONFIG_ARCH_POPULATES_NODE_MAP=y
205CONFIG_SELECT_MEMORY_MODEL=y
206CONFIG_FLATMEM_MANUAL=y
207# CONFIG_DISCONTIGMEM_MANUAL is not set
208# CONFIG_SPARSEMEM_MANUAL is not set
209CONFIG_FLATMEM=y
210CONFIG_FLAT_NODE_MEM_MAP=y
211# CONFIG_SPARSEMEM_STATIC is not set
212# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
213CONFIG_PAGEFLAGS_EXTENDED=y
214CONFIG_SPLIT_PTLOCK_CPUS=4
215CONFIG_RESOURCES_64BIT=y
216CONFIG_ZONE_DMA_FLAG=1
217CONFIG_BOUNCE=y
218CONFIG_VIRT_TO_BUS=y
219CONFIG_FORCE_MAX_ZONEORDER=11
220CONFIG_PROC_DEVICETREE=y
221# CONFIG_CMDLINE_BOOL is not set
222# CONFIG_PM is not set
223CONFIG_SECCOMP=y
224CONFIG_ISA_DMA_API=y
225
226#
227# Bus options
228#
229CONFIG_ZONE_DMA=y
230CONFIG_PPC_INDIRECT_PCI=y
231CONFIG_4xx_SOC=y
232CONFIG_PCI=y
233CONFIG_PCI_DOMAINS=y
234CONFIG_PCI_SYSCALL=y
235# CONFIG_PCIEPORTBUS is not set
236CONFIG_ARCH_SUPPORTS_MSI=y
237# CONFIG_PCI_MSI is not set
238# CONFIG_PCI_LEGACY is not set
239# CONFIG_PCI_DEBUG is not set
240# CONFIG_PCCARD is not set
241# CONFIG_HOTPLUG_PCI is not set
242# CONFIG_HAS_RAPIDIO is not set
243
244#
245# Advanced setup
246#
247# CONFIG_ADVANCED_OPTIONS is not set
248
249#
250# Default settings for advanced configuration options are used
251#
252CONFIG_LOWMEM_SIZE=0x30000000
253CONFIG_PAGE_OFFSET=0xc0000000
254CONFIG_KERNEL_START=0xc0000000
255CONFIG_PHYSICAL_START=0x00000000
256CONFIG_TASK_SIZE=0xc0000000
257CONFIG_CONSISTENT_START=0xff100000
258CONFIG_CONSISTENT_SIZE=0x00200000
259
260#
261# Networking
262#
263CONFIG_NET=y
264
265#
266# Networking options
267#
268CONFIG_PACKET=y
269# CONFIG_PACKET_MMAP is not set
270CONFIG_UNIX=y
271# CONFIG_NET_KEY is not set
272CONFIG_INET=y
273# CONFIG_IP_MULTICAST is not set
274# CONFIG_IP_ADVANCED_ROUTER is not set
275CONFIG_IP_FIB_HASH=y
276CONFIG_IP_PNP=y
277CONFIG_IP_PNP_DHCP=y
278CONFIG_IP_PNP_BOOTP=y
279# CONFIG_IP_PNP_RARP is not set
280# CONFIG_NET_IPIP is not set
281# CONFIG_NET_IPGRE is not set
282# CONFIG_ARPD is not set
283# CONFIG_SYN_COOKIES is not set
284# CONFIG_INET_AH is not set
285# CONFIG_INET_ESP is not set
286# CONFIG_INET_IPCOMP is not set
287# CONFIG_INET_XFRM_TUNNEL is not set
288# CONFIG_INET_TUNNEL is not set
289# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
290# CONFIG_INET_XFRM_MODE_TUNNEL is not set
291# CONFIG_INET_XFRM_MODE_BEET is not set
292# CONFIG_INET_LRO is not set
293CONFIG_INET_DIAG=y
294CONFIG_INET_TCP_DIAG=y
295# CONFIG_TCP_CONG_ADVANCED is not set
296CONFIG_TCP_CONG_CUBIC=y
297CONFIG_DEFAULT_TCP_CONG="cubic"
298# CONFIG_TCP_MD5SIG is not set
299# CONFIG_IPV6 is not set
300# CONFIG_NETWORK_SECMARK is not set
301# CONFIG_NETFILTER is not set
302# CONFIG_IP_DCCP is not set
303# CONFIG_IP_SCTP is not set
304# CONFIG_TIPC is not set
305# CONFIG_ATM is not set
306# CONFIG_BRIDGE is not set
307# CONFIG_VLAN_8021Q is not set
308# CONFIG_DECNET is not set
309# CONFIG_LLC2 is not set
310# CONFIG_IPX is not set
311# CONFIG_ATALK is not set
312# CONFIG_X25 is not set
313# CONFIG_LAPB is not set
314# CONFIG_ECONET is not set
315# CONFIG_WAN_ROUTER is not set
316# CONFIG_NET_SCHED is not set
317
318#
319# Network testing
320#
321# CONFIG_NET_PKTGEN is not set
322# CONFIG_HAMRADIO is not set
323# CONFIG_CAN is not set
324# CONFIG_IRDA is not set
325# CONFIG_BT is not set
326# CONFIG_AF_RXRPC is not set
327
328#
329# Wireless
330#
331# CONFIG_CFG80211 is not set
332# CONFIG_WIRELESS_EXT is not set
333# CONFIG_MAC80211 is not set
334# CONFIG_IEEE80211 is not set
335# CONFIG_RFKILL is not set
336# CONFIG_NET_9P is not set
337
338#
339# Device Drivers
340#
341
342#
343# Generic Driver Options
344#
345CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
346CONFIG_STANDALONE=y
347CONFIG_PREVENT_FIRMWARE_BUILD=y
348CONFIG_FW_LOADER=y
349# CONFIG_DEBUG_DRIVER is not set
350# CONFIG_DEBUG_DEVRES is not set
351# CONFIG_SYS_HYPERVISOR is not set
352CONFIG_CONNECTOR=y
353CONFIG_PROC_EVENTS=y
354CONFIG_MTD=y
355# CONFIG_MTD_DEBUG is not set
356# CONFIG_MTD_CONCAT is not set
357CONFIG_MTD_PARTITIONS=y
358# CONFIG_MTD_REDBOOT_PARTS is not set
359CONFIG_MTD_CMDLINE_PARTS=y
360CONFIG_MTD_OF_PARTS=y
361# CONFIG_MTD_AR7_PARTS is not set
362
363#
364# User Modules And Translation Layers
365#
366CONFIG_MTD_CHAR=y
367CONFIG_MTD_BLKDEVS=m
368CONFIG_MTD_BLOCK=m
369# CONFIG_MTD_BLOCK_RO is not set
370# CONFIG_FTL is not set
371# CONFIG_NFTL is not set
372# CONFIG_INFTL is not set
373# CONFIG_RFD_FTL is not set
374# CONFIG_SSFDC is not set
375# CONFIG_MTD_OOPS is not set
376
377#
378# RAM/ROM/Flash chip drivers
379#
380CONFIG_MTD_CFI=y
381CONFIG_MTD_JEDECPROBE=y
382CONFIG_MTD_GEN_PROBE=y
383# CONFIG_MTD_CFI_ADV_OPTIONS is not set
384CONFIG_MTD_MAP_BANK_WIDTH_1=y
385CONFIG_MTD_MAP_BANK_WIDTH_2=y
386CONFIG_MTD_MAP_BANK_WIDTH_4=y
387# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
388# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
389# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
390CONFIG_MTD_CFI_I1=y
391CONFIG_MTD_CFI_I2=y
392# CONFIG_MTD_CFI_I4 is not set
393# CONFIG_MTD_CFI_I8 is not set
394# CONFIG_MTD_CFI_INTELEXT is not set
395CONFIG_MTD_CFI_AMDSTD=y
396# CONFIG_MTD_CFI_STAA is not set
397CONFIG_MTD_CFI_UTIL=y
398# CONFIG_MTD_RAM is not set
399# CONFIG_MTD_ROM is not set
400# CONFIG_MTD_ABSENT is not set
401
402#
403# Mapping drivers for chip access
404#
405# CONFIG_MTD_COMPLEX_MAPPINGS is not set
406# CONFIG_MTD_PHYSMAP is not set
407CONFIG_MTD_PHYSMAP_OF=y
408# CONFIG_MTD_INTEL_VR_NOR is not set
409# CONFIG_MTD_PLATRAM is not set
410
411#
412# Self-contained MTD device drivers
413#
414# CONFIG_MTD_PMC551 is not set
415# CONFIG_MTD_SLRAM is not set
416# CONFIG_MTD_PHRAM is not set
417# CONFIG_MTD_MTDRAM is not set
418# CONFIG_MTD_BLOCK2MTD is not set
419
420#
421# Disk-On-Chip Device Drivers
422#
423# CONFIG_MTD_DOC2000 is not set
424# CONFIG_MTD_DOC2001 is not set
425# CONFIG_MTD_DOC2001PLUS is not set
426# CONFIG_MTD_NAND is not set
427# CONFIG_MTD_ONENAND is not set
428
429#
430# UBI - Unsorted block images
431#
432# CONFIG_MTD_UBI is not set
433CONFIG_OF_DEVICE=y
434# CONFIG_PARPORT is not set
435CONFIG_BLK_DEV=y
436# CONFIG_BLK_DEV_FD is not set
437# CONFIG_BLK_CPQ_DA is not set
438# CONFIG_BLK_CPQ_CISS_DA is not set
439# CONFIG_BLK_DEV_DAC960 is not set
440# CONFIG_BLK_DEV_UMEM is not set
441# CONFIG_BLK_DEV_COW_COMMON is not set
442# CONFIG_BLK_DEV_LOOP is not set
443# CONFIG_BLK_DEV_NBD is not set
444# CONFIG_BLK_DEV_SX8 is not set
445CONFIG_BLK_DEV_RAM=y
446CONFIG_BLK_DEV_RAM_COUNT=16
447CONFIG_BLK_DEV_RAM_SIZE=35000
448# CONFIG_BLK_DEV_XIP is not set
449# CONFIG_CDROM_PKTCDVD is not set
450# CONFIG_ATA_OVER_ETH is not set
451# CONFIG_XILINX_SYSACE is not set
452CONFIG_MISC_DEVICES=y
453# CONFIG_PHANTOM is not set
454# CONFIG_EEPROM_93CX6 is not set
455# CONFIG_SGI_IOC4 is not set
456# CONFIG_TIFM_CORE is not set
457# CONFIG_ENCLOSURE_SERVICES is not set
458CONFIG_HAVE_IDE=y
459# CONFIG_IDE is not set
460
461#
462# SCSI device support
463#
464# CONFIG_RAID_ATTRS is not set
465# CONFIG_SCSI is not set
466# CONFIG_SCSI_DMA is not set
467# CONFIG_SCSI_NETLINK is not set
468# CONFIG_ATA is not set
469# CONFIG_MD is not set
470# CONFIG_FUSION is not set
471
472#
473# IEEE 1394 (FireWire) support
474#
475
476#
477# Enable only one of the two stacks, unless you know what you are doing
478#
479# CONFIG_FIREWIRE is not set
480# CONFIG_IEEE1394 is not set
481# CONFIG_I2O is not set
482# CONFIG_MACINTOSH_DRIVERS is not set
483CONFIG_NETDEVICES=y
484# CONFIG_NETDEVICES_MULTIQUEUE is not set
485# CONFIG_DUMMY is not set
486# CONFIG_BONDING is not set
487# CONFIG_MACVLAN is not set
488# CONFIG_EQUALIZER is not set
489# CONFIG_TUN is not set
490# CONFIG_VETH is not set
491# CONFIG_ARCNET is not set
492# CONFIG_PHYLIB is not set
493CONFIG_NET_ETHERNET=y
494# CONFIG_MII is not set
495# CONFIG_HAPPYMEAL is not set
496# CONFIG_SUNGEM is not set
497# CONFIG_CASSINI is not set
498# CONFIG_NET_VENDOR_3COM is not set
499# CONFIG_NET_TULIP is not set
500# CONFIG_HP100 is not set
501CONFIG_IBM_NEW_EMAC=y
502CONFIG_IBM_NEW_EMAC_RXB=128
503CONFIG_IBM_NEW_EMAC_TXB=64
504CONFIG_IBM_NEW_EMAC_POLL_WEIGHT=32
505CONFIG_IBM_NEW_EMAC_RX_COPY_THRESHOLD=256
506CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM=0
507# CONFIG_IBM_NEW_EMAC_DEBUG is not set
508# CONFIG_IBM_NEW_EMAC_ZMII is not set
509# CONFIG_IBM_NEW_EMAC_RGMII is not set
510# CONFIG_IBM_NEW_EMAC_TAH is not set
511# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
512# CONFIG_NET_PCI is not set
513# CONFIG_B44 is not set
514CONFIG_NETDEV_1000=y
515# CONFIG_ACENIC is not set
516# CONFIG_DL2K is not set
517# CONFIG_E1000 is not set
518# CONFIG_E1000E is not set
519# CONFIG_E1000E_ENABLED is not set
520# CONFIG_IP1000 is not set
521# CONFIG_IGB is not set
522# CONFIG_NS83820 is not set
523# CONFIG_HAMACHI is not set
524# CONFIG_YELLOWFIN is not set
525# CONFIG_R8169 is not set
526# CONFIG_SIS190 is not set
527# CONFIG_SKGE is not set
528# CONFIG_SKY2 is not set
529# CONFIG_VIA_VELOCITY is not set
530# CONFIG_TIGON3 is not set
531# CONFIG_BNX2 is not set
532# CONFIG_QLA3XXX is not set
533# CONFIG_ATL1 is not set
534CONFIG_NETDEV_10000=y
535# CONFIG_CHELSIO_T1 is not set
536# CONFIG_CHELSIO_T3 is not set
537# CONFIG_IXGBE is not set
538# CONFIG_IXGB is not set
539# CONFIG_S2IO is not set
540# CONFIG_MYRI10GE is not set
541# CONFIG_NETXEN_NIC is not set
542# CONFIG_NIU is not set
543# CONFIG_MLX4_CORE is not set
544# CONFIG_TEHUTI is not set
545# CONFIG_BNX2X is not set
546# CONFIG_SFC is not set
547# CONFIG_TR is not set
548
549#
550# Wireless LAN
551#
552# CONFIG_WLAN_PRE80211 is not set
553# CONFIG_WLAN_80211 is not set
554# CONFIG_IWLWIFI_LEDS is not set
555# CONFIG_WAN is not set
556# CONFIG_FDDI is not set
557# CONFIG_HIPPI is not set
558# CONFIG_PPP is not set
559# CONFIG_SLIP is not set
560# CONFIG_NETCONSOLE is not set
561# CONFIG_NETPOLL is not set
562# CONFIG_NET_POLL_CONTROLLER is not set
563# CONFIG_ISDN is not set
564# CONFIG_PHONE is not set
565
566#
567# Input device support
568#
569# CONFIG_INPUT is not set
570
571#
572# Hardware I/O ports
573#
574# CONFIG_SERIO is not set
575# CONFIG_GAMEPORT is not set
576
577#
578# Character devices
579#
580# CONFIG_VT is not set
581CONFIG_DEVKMEM=y
582# CONFIG_SERIAL_NONSTANDARD is not set
583# CONFIG_NOZOMI is not set
584
585#
586# Serial drivers
587#
588CONFIG_SERIAL_8250=y
589CONFIG_SERIAL_8250_CONSOLE=y
590CONFIG_SERIAL_8250_PCI=y
591CONFIG_SERIAL_8250_NR_UARTS=4
592CONFIG_SERIAL_8250_RUNTIME_UARTS=4
593CONFIG_SERIAL_8250_EXTENDED=y
594# CONFIG_SERIAL_8250_MANY_PORTS is not set
595CONFIG_SERIAL_8250_SHARE_IRQ=y
596# CONFIG_SERIAL_8250_DETECT_IRQ is not set
597# CONFIG_SERIAL_8250_RSA is not set
598
599#
600# Non-8250 serial port support
601#
602# CONFIG_SERIAL_UARTLITE is not set
603CONFIG_SERIAL_CORE=y
604CONFIG_SERIAL_CORE_CONSOLE=y
605# CONFIG_SERIAL_JSM is not set
606CONFIG_SERIAL_OF_PLATFORM=y
607CONFIG_UNIX98_PTYS=y
608CONFIG_LEGACY_PTYS=y
609CONFIG_LEGACY_PTY_COUNT=256
610# CONFIG_IPMI_HANDLER is not set
611# CONFIG_HW_RANDOM is not set
612# CONFIG_NVRAM is not set
613# CONFIG_GEN_RTC is not set
614# CONFIG_R3964 is not set
615# CONFIG_APPLICOM is not set
616# CONFIG_RAW_DRIVER is not set
617# CONFIG_TCG_TPM is not set
618CONFIG_DEVPORT=y
619# CONFIG_I2C is not set
620# CONFIG_SPI is not set
621# CONFIG_W1 is not set
622# CONFIG_POWER_SUPPLY is not set
623# CONFIG_HWMON is not set
624# CONFIG_THERMAL is not set
625# CONFIG_THERMAL_HWMON is not set
626# CONFIG_WATCHDOG is not set
627
628#
629# Sonics Silicon Backplane
630#
631CONFIG_SSB_POSSIBLE=y
632# CONFIG_SSB is not set
633
634#
635# Multifunction device drivers
636#
637# CONFIG_MFD_SM501 is not set
638# CONFIG_HTC_PASIC3 is not set
639
640#
641# Multimedia devices
642#
643
644#
645# Multimedia core support
646#
647# CONFIG_VIDEO_DEV is not set
648# CONFIG_DVB_CORE is not set
649# CONFIG_VIDEO_MEDIA is not set
650
651#
652# Multimedia drivers
653#
654# CONFIG_DAB is not set
655
656#
657# Graphics support
658#
659# CONFIG_AGP is not set
660# CONFIG_DRM is not set
661# CONFIG_VGASTATE is not set
662CONFIG_VIDEO_OUTPUT_CONTROL=m
663# CONFIG_FB is not set
664# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
665
666#
667# Display device support
668#
669# CONFIG_DISPLAY_SUPPORT is not set
670
671#
672# Sound
673#
674# CONFIG_SOUND is not set
675# CONFIG_USB_SUPPORT is not set
676# CONFIG_MMC is not set
677# CONFIG_MEMSTICK is not set
678# CONFIG_NEW_LEDS is not set
679# CONFIG_ACCESSIBILITY is not set
680# CONFIG_INFINIBAND is not set
681# CONFIG_EDAC is not set
682# CONFIG_RTC_CLASS is not set
683# CONFIG_DMADEVICES is not set
684# CONFIG_UIO is not set
685
686#
687# File systems
688#
689CONFIG_EXT2_FS=y
690# CONFIG_EXT2_FS_XATTR is not set
691# CONFIG_EXT2_FS_XIP is not set
692# CONFIG_EXT3_FS is not set
693# CONFIG_EXT4DEV_FS is not set
694# CONFIG_REISERFS_FS is not set
695# CONFIG_JFS_FS is not set
696# CONFIG_FS_POSIX_ACL is not set
697# CONFIG_XFS_FS is not set
698# CONFIG_OCFS2_FS is not set
699CONFIG_DNOTIFY=y
700CONFIG_INOTIFY=y
701CONFIG_INOTIFY_USER=y
702# CONFIG_QUOTA is not set
703# CONFIG_AUTOFS_FS is not set
704# CONFIG_AUTOFS4_FS is not set
705# CONFIG_FUSE_FS is not set
706
707#
708# CD-ROM/DVD Filesystems
709#
710# CONFIG_ISO9660_FS is not set
711# CONFIG_UDF_FS is not set
712
713#
714# DOS/FAT/NT Filesystems
715#
716# CONFIG_MSDOS_FS is not set
717# CONFIG_VFAT_FS is not set
718# CONFIG_NTFS_FS is not set
719
720#
721# Pseudo filesystems
722#
723CONFIG_PROC_FS=y
724CONFIG_PROC_KCORE=y
725CONFIG_PROC_SYSCTL=y
726CONFIG_SYSFS=y
727CONFIG_TMPFS=y
728# CONFIG_TMPFS_POSIX_ACL is not set
729# CONFIG_HUGETLB_PAGE is not set
730# CONFIG_CONFIGFS_FS is not set
731
732#
733# Miscellaneous filesystems
734#
735# CONFIG_ADFS_FS is not set
736# CONFIG_AFFS_FS is not set
737# CONFIG_HFS_FS is not set
738# CONFIG_HFSPLUS_FS is not set
739# CONFIG_BEFS_FS is not set
740# CONFIG_BFS_FS is not set
741# CONFIG_EFS_FS is not set
742# CONFIG_YAFFS_FS is not set
743# CONFIG_JFFS2_FS is not set
744CONFIG_CRAMFS=y
745# CONFIG_VXFS_FS is not set
746# CONFIG_MINIX_FS is not set
747# CONFIG_HPFS_FS is not set
748# CONFIG_QNX4FS_FS is not set
749# CONFIG_ROMFS_FS is not set
750# CONFIG_SYSV_FS is not set
751# CONFIG_UFS_FS is not set
752CONFIG_NETWORK_FILESYSTEMS=y
753CONFIG_NFS_FS=y
754CONFIG_NFS_V3=y
755# CONFIG_NFS_V3_ACL is not set
756# CONFIG_NFS_V4 is not set
757# CONFIG_NFSD is not set
758CONFIG_ROOT_NFS=y
759CONFIG_LOCKD=y
760CONFIG_LOCKD_V4=y
761CONFIG_NFS_COMMON=y
762CONFIG_SUNRPC=y
763# CONFIG_SUNRPC_BIND34 is not set
764# CONFIG_RPCSEC_GSS_KRB5 is not set
765# CONFIG_RPCSEC_GSS_SPKM3 is not set
766# CONFIG_SMB_FS is not set
767# CONFIG_CIFS is not set
768# CONFIG_NCP_FS is not set
769# CONFIG_CODA_FS is not set
770# CONFIG_AFS_FS is not set
771
772#
773# Partition Types
774#
775# CONFIG_PARTITION_ADVANCED is not set
776CONFIG_MSDOS_PARTITION=y
777# CONFIG_NLS is not set
778# CONFIG_DLM is not set
779
780#
781# Library routines
782#
783CONFIG_BITREVERSE=y
784# CONFIG_GENERIC_FIND_FIRST_BIT is not set
785# CONFIG_CRC_CCITT is not set
786# CONFIG_CRC16 is not set
787# CONFIG_CRC_ITU_T is not set
788CONFIG_CRC32=y
789# CONFIG_CRC7 is not set
790# CONFIG_LIBCRC32C is not set
791CONFIG_ZLIB_INFLATE=y
792CONFIG_PLIST=y
793CONFIG_HAS_IOMEM=y
794CONFIG_HAS_IOPORT=y
795CONFIG_HAS_DMA=y
796CONFIG_HAVE_LMB=y
797
798#
799# Kernel hacking
800#
801# CONFIG_PRINTK_TIME is not set
802CONFIG_ENABLE_WARN_DEPRECATED=y
803CONFIG_ENABLE_MUST_CHECK=y
804CONFIG_FRAME_WARN=1024
805CONFIG_MAGIC_SYSRQ=y
806# CONFIG_UNUSED_SYMBOLS is not set
807CONFIG_DEBUG_FS=y
808# CONFIG_HEADERS_CHECK is not set
809CONFIG_DEBUG_KERNEL=y
810# CONFIG_DEBUG_SHIRQ is not set
811CONFIG_DETECT_SOFTLOCKUP=y
812CONFIG_SCHED_DEBUG=y
813# CONFIG_SCHEDSTATS is not set
814# CONFIG_TIMER_STATS is not set
815# CONFIG_DEBUG_OBJECTS is not set
816# CONFIG_SLUB_DEBUG_ON is not set
817# CONFIG_SLUB_STATS is not set
818# CONFIG_DEBUG_RT_MUTEXES is not set
819# CONFIG_RT_MUTEX_TESTER is not set
820# CONFIG_DEBUG_SPINLOCK is not set
821# CONFIG_DEBUG_MUTEXES is not set
822# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
823# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
824# CONFIG_DEBUG_KOBJECT is not set
825CONFIG_DEBUG_BUGVERBOSE=y
826# CONFIG_DEBUG_INFO is not set
827# CONFIG_DEBUG_VM is not set
828# CONFIG_DEBUG_WRITECOUNT is not set
829# CONFIG_DEBUG_LIST is not set
830# CONFIG_DEBUG_SG is not set
831# CONFIG_BOOT_PRINTK_DELAY is not set
832# CONFIG_RCU_TORTURE_TEST is not set
833# CONFIG_BACKTRACE_SELF_TEST is not set
834# CONFIG_FAULT_INJECTION is not set
835# CONFIG_SAMPLES is not set
836# CONFIG_DEBUG_STACKOVERFLOW is not set
837# CONFIG_DEBUG_STACK_USAGE is not set
838# CONFIG_DEBUG_PAGEALLOC is not set
839# CONFIG_DEBUGGER is not set
840# CONFIG_IRQSTACKS is not set
841# CONFIG_VIRQ_DEBUG is not set
842# CONFIG_BDI_SWITCH is not set
843# CONFIG_PPC_EARLY_DEBUG is not set
844
845#
846# Security options
847#
848# CONFIG_KEYS is not set
849# CONFIG_SECURITY is not set
850# CONFIG_SECURITY_FILE_CAPABILITIES is not set
851CONFIG_CRYPTO=y
852
853#
854# Crypto core or helper
855#
856CONFIG_CRYPTO_ALGAPI=y
857CONFIG_CRYPTO_BLKCIPHER=y
858CONFIG_CRYPTO_MANAGER=y
859# CONFIG_CRYPTO_GF128MUL is not set
860# CONFIG_CRYPTO_NULL is not set
861# CONFIG_CRYPTO_CRYPTD is not set
862# CONFIG_CRYPTO_AUTHENC is not set
863# CONFIG_CRYPTO_TEST is not set
864
865#
866# Authenticated Encryption with Associated Data
867#
868# CONFIG_CRYPTO_CCM is not set
869# CONFIG_CRYPTO_GCM is not set
870# CONFIG_CRYPTO_SEQIV is not set
871
872#
873# Block modes
874#
875CONFIG_CRYPTO_CBC=y
876# CONFIG_CRYPTO_CTR is not set
877# CONFIG_CRYPTO_CTS is not set
878CONFIG_CRYPTO_ECB=y
879# CONFIG_CRYPTO_LRW is not set
880CONFIG_CRYPTO_PCBC=y
881# CONFIG_CRYPTO_XTS is not set
882
883#
884# Hash modes
885#
886# CONFIG_CRYPTO_HMAC is not set
887# CONFIG_CRYPTO_XCBC is not set
888
889#
890# Digest
891#
892# CONFIG_CRYPTO_CRC32C is not set
893# CONFIG_CRYPTO_MD4 is not set
894CONFIG_CRYPTO_MD5=y
895# CONFIG_CRYPTO_MICHAEL_MIC is not set
896# CONFIG_CRYPTO_SHA1 is not set
897# CONFIG_CRYPTO_SHA256 is not set
898# CONFIG_CRYPTO_SHA512 is not set
899# CONFIG_CRYPTO_TGR192 is not set
900# CONFIG_CRYPTO_WP512 is not set
901
902#
903# Ciphers
904#
905# CONFIG_CRYPTO_AES is not set
906# CONFIG_CRYPTO_ANUBIS is not set
907# CONFIG_CRYPTO_ARC4 is not set
908# CONFIG_CRYPTO_BLOWFISH is not set
909# CONFIG_CRYPTO_CAMELLIA is not set
910# CONFIG_CRYPTO_CAST5 is not set
911# CONFIG_CRYPTO_CAST6 is not set
912CONFIG_CRYPTO_DES=y
913# CONFIG_CRYPTO_FCRYPT is not set
914# CONFIG_CRYPTO_KHAZAD is not set
915# CONFIG_CRYPTO_SALSA20 is not set
916# CONFIG_CRYPTO_SEED is not set
917# CONFIG_CRYPTO_SERPENT is not set
918# CONFIG_CRYPTO_TEA is not set
919# CONFIG_CRYPTO_TWOFISH is not set
920
921#
922# Compression
923#
924# CONFIG_CRYPTO_DEFLATE is not set
925# CONFIG_CRYPTO_LZO is not set
926CONFIG_CRYPTO_HW=y
927# CONFIG_CRYPTO_DEV_HIFN_795X is not set
928# CONFIG_PPC_CLOCK is not set
929# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
index f6c93c716898..a503da9d56f3 100644
--- a/arch/powerpc/include/asm/kdump.h
+++ b/arch/powerpc/include/asm/kdump.h
@@ -9,6 +9,12 @@
9 * Reserve to the end of the FWNMI area, see head_64.S */ 9 * Reserve to the end of the FWNMI area, see head_64.S */
10#define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */ 10#define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */
11 11
12/*
13 * Used to differentiate between relocatable kdump kernel and other
14 * kernels
15 */
16#define KDUMP_SIGNATURE 0xfeed1234
17
12#ifdef CONFIG_CRASH_DUMP 18#ifdef CONFIG_CRASH_DUMP
13 19
14#define KDUMP_TRAMPOLINE_START 0x0100 20#define KDUMP_TRAMPOLINE_START 0x0100
@@ -19,17 +25,18 @@
19#endif /* CONFIG_CRASH_DUMP */ 25#endif /* CONFIG_CRASH_DUMP */
20 26
21#ifndef __ASSEMBLY__ 27#ifndef __ASSEMBLY__
22#ifdef CONFIG_CRASH_DUMP
23 28
29extern unsigned long __kdump_flag;
30
31#if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_RELOCATABLE)
24extern void reserve_kdump_trampoline(void); 32extern void reserve_kdump_trampoline(void);
25extern void setup_kdump_trampoline(void); 33extern void setup_kdump_trampoline(void);
26 34#else
27#else /* !CONFIG_CRASH_DUMP */ 35/* !CRASH_DUMP || RELOCATABLE */
28
29static inline void reserve_kdump_trampoline(void) { ; } 36static inline void reserve_kdump_trampoline(void) { ; }
30static inline void setup_kdump_trampoline(void) { ; } 37static inline void setup_kdump_trampoline(void) { ; }
38#endif
31 39
32#endif /* CONFIG_CRASH_DUMP */
33#endif /* __ASSEMBLY__ */ 40#endif /* __ASSEMBLY__ */
34 41
35#endif /* __PPC64_KDUMP_H */ 42#endif /* __PPC64_KDUMP_H */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 5ac51e6efc1d..c0b8d4a29a91 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -77,6 +77,7 @@
77 77
78#if defined(CONFIG_RELOCATABLE) 78#if defined(CONFIG_RELOCATABLE)
79#ifndef __ASSEMBLY__ 79#ifndef __ASSEMBLY__
80
80extern phys_addr_t memstart_addr; 81extern phys_addr_t memstart_addr;
81extern phys_addr_t kernstart_addr; 82extern phys_addr_t kernstart_addr;
82#endif 83#endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index e70d0483fb4e..b1eb834bc0fc 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1277,6 +1277,19 @@ static struct cpu_spec __initdata cpu_specs[] = {
1277 .machine_check = machine_check_4xx, 1277 .machine_check = machine_check_4xx,
1278 .platform = "ppc405", 1278 .platform = "ppc405",
1279 }, 1279 },
1280 {
1281 /* 405EZ */
1282 .pvr_mask = 0xffff0000,
1283 .pvr_value = 0x41510000,
1284 .cpu_name = "405EZ",
1285 .cpu_features = CPU_FTRS_40X,
1286 .cpu_user_features = PPC_FEATURE_32 |
1287 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
1288 .icache_bsize = 32,
1289 .dcache_bsize = 32,
1290 .machine_check = machine_check_4xx,
1291 .platform = "ppc405",
1292 },
1280 { /* default match */ 1293 { /* default match */
1281 .pvr_mask = 0x00000000, 1294 .pvr_mask = 0x00000000,
1282 .pvr_value = 0x00000000, 1295 .pvr_value = 0x00000000,
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 97e056379728..19671aca6591 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -30,6 +30,7 @@
30/* Stores the physical address of elf header of crash image. */ 30/* Stores the physical address of elf header of crash image. */
31unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; 31unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
32 32
33#ifndef CONFIG_RELOCATABLE
33void __init reserve_kdump_trampoline(void) 34void __init reserve_kdump_trampoline(void)
34{ 35{
35 lmb_reserve(0, KDUMP_RESERVE_LIMIT); 36 lmb_reserve(0, KDUMP_RESERVE_LIMIT);
@@ -68,6 +69,7 @@ void __init setup_kdump_trampoline(void)
68 69
69 DBG(" <- setup_kdump_trampoline()\n"); 70 DBG(" <- setup_kdump_trampoline()\n");
70} 71}
72#endif /* CONFIG_RELOCATABLE */
71 73
72/* 74/*
73 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by 75 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 84856bee33a5..69489bd3210c 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -97,6 +97,12 @@ __secondary_hold_spinloop:
97__secondary_hold_acknowledge: 97__secondary_hold_acknowledge:
98 .llong 0x0 98 .llong 0x0
99 99
100 /* This flag is set by purgatory if we should be a kdump kernel. */
101 /* Do not move this variable as purgatory knows about it. */
102 .globl __kdump_flag
103__kdump_flag:
104 .llong 0x0
105
100#ifdef CONFIG_PPC_ISERIES 106#ifdef CONFIG_PPC_ISERIES
101 /* 107 /*
102 * At offset 0x20, there is a pointer to iSeries LPAR data. 108 * At offset 0x20, there is a pointer to iSeries LPAR data.
@@ -1384,7 +1390,13 @@ _STATIC(__after_prom_start)
1384 /* process relocations for the final address of the kernel */ 1390 /* process relocations for the final address of the kernel */
1385 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ 1391 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
1386 sldi r25,r25,32 1392 sldi r25,r25,32
1387 mr r3,r25 1393#ifdef CONFIG_CRASH_DUMP
1394 ld r7,__kdump_flag-_stext(r26)
1395 cmpldi cr0,r7,1 /* kdump kernel ? - stay where we are */
1396 bne 1f
1397 add r25,r25,r26
1398#endif
13991: mr r3,r25
1388 bl .relocate 1400 bl .relocate
1389#endif 1401#endif
1390 1402
@@ -1398,11 +1410,26 @@ _STATIC(__after_prom_start)
1398 li r3,0 /* target addr */ 1410 li r3,0 /* target addr */
1399 mr. r4,r26 /* In some cases the loader may */ 1411 mr. r4,r26 /* In some cases the loader may */
1400 beq 9f /* have already put us at zero */ 1412 beq 9f /* have already put us at zero */
1401 lis r5,(copy_to_here - _stext)@ha
1402 addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
1403 li r6,0x100 /* Start offset, the first 0x100 */ 1413 li r6,0x100 /* Start offset, the first 0x100 */
1404 /* bytes were copied earlier. */ 1414 /* bytes were copied earlier. */
1405 1415
1416#ifdef CONFIG_CRASH_DUMP
1417/*
1418 * Check if the kernel has to be running as relocatable kernel based on the
1419 * variable __kdump_flag, if it is set the kernel is treated as relocatable
1420 * kernel, otherwise it will be moved to PHYSICAL_START
1421 */
1422 ld r7,__kdump_flag-_stext(r26)
1423 cmpldi cr0,r7,1
1424 bne 3f
1425
1426 li r5,__end_interrupts - _stext /* just copy interrupts */
1427 b 5f
14283:
1429#endif
1430 lis r5,(copy_to_here - _stext)@ha
1431 addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
1432
1406 bl .copy_and_flush /* copy the first n bytes */ 1433 bl .copy_and_flush /* copy the first n bytes */
1407 /* this includes the code being */ 1434 /* this includes the code being */
1408 /* executed here. */ 1435 /* executed here. */
@@ -1411,15 +1438,15 @@ _STATIC(__after_prom_start)
1411 mtctr r8 1438 mtctr r8
1412 bctr 1439 bctr
1413 1440
1441p_end: .llong _end - _stext
1442
14144: /* Now copy the rest of the kernel up to _end */ 14434: /* Now copy the rest of the kernel up to _end */
1415 addis r5,r26,(p_end - _stext)@ha 1444 addis r5,r26,(p_end - _stext)@ha
1416 ld r5,(p_end - _stext)@l(r5) /* get _end */ 1445 ld r5,(p_end - _stext)@l(r5) /* get _end */
1417 bl .copy_and_flush /* copy the rest */ 14465: bl .copy_and_flush /* copy the rest */
1418 1447
14199: b .start_here_multiplatform 14489: b .start_here_multiplatform
1420 1449
1421p_end: .llong _end - _stext
1422
1423/* 1450/*
1424 * Copy routine used to copy the kernel to start at physical address 0 1451 * Copy routine used to copy the kernel to start at physical address 0
1425 * and flush and invalidate the caches as needed. 1452 * and flush and invalidate the caches as needed.
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ea1ba89f9c90..3857d7e2af0c 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -458,6 +458,42 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
458 spin_unlock_irqrestore(&(tbl->it_lock), flags); 458 spin_unlock_irqrestore(&(tbl->it_lock), flags);
459} 459}
460 460
461static void iommu_table_clear(struct iommu_table *tbl)
462{
463 if (!__kdump_flag) {
464 /* Clear the table in case firmware left allocations in it */
465 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
466 return;
467 }
468
469#ifdef CONFIG_CRASH_DUMP
470 if (ppc_md.tce_get) {
471 unsigned long index, tceval, tcecount = 0;
472
473 /* Reserve the existing mappings left by the first kernel. */
474 for (index = 0; index < tbl->it_size; index++) {
475 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
476 /*
477 * Freed TCE entry contains 0x7fffffffffffffff on JS20
478 */
479 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
480 __set_bit(index, tbl->it_map);
481 tcecount++;
482 }
483 }
484
485 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
486 printk(KERN_WARNING "TCE table is full; freeing ");
487 printk(KERN_WARNING "%d entries for the kdump boot\n",
488 KDUMP_MIN_TCE_ENTRIES);
489 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
490 index < tbl->it_size; index++)
491 __clear_bit(index, tbl->it_map);
492 }
493 }
494#endif
495}
496
461/* 497/*
462 * Build a iommu_table structure. This contains a bit map which 498 * Build a iommu_table structure. This contains a bit map which
463 * is used to manage allocation of the tce space. 499 * is used to manage allocation of the tce space.
@@ -484,38 +520,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
484 tbl->it_largehint = tbl->it_halfpoint; 520 tbl->it_largehint = tbl->it_halfpoint;
485 spin_lock_init(&tbl->it_lock); 521 spin_lock_init(&tbl->it_lock);
486 522
487#ifdef CONFIG_CRASH_DUMP 523 iommu_table_clear(tbl);
488 if (ppc_md.tce_get) {
489 unsigned long index;
490 unsigned long tceval;
491 unsigned long tcecount = 0;
492
493 /*
494 * Reserve the existing mappings left by the first kernel.
495 */
496 for (index = 0; index < tbl->it_size; index++) {
497 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
498 /*
499 * Freed TCE entry contains 0x7fffffffffffffff on JS20
500 */
501 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
502 __set_bit(index, tbl->it_map);
503 tcecount++;
504 }
505 }
506 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
507 printk(KERN_WARNING "TCE table is full; ");
508 printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
509 KDUMP_MIN_TCE_ENTRIES);
510 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
511 index < tbl->it_size; index++)
512 __clear_bit(index, tbl->it_map);
513 }
514 }
515#else
516 /* Clear the hardware table in case firmware left allocations in it */
517 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
518#endif
519 524
520 if (!welcomed) { 525 if (!welcomed) {
521 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", 526 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index aab76887a842..ac2a21f45c75 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -88,11 +88,13 @@ void __init reserve_crashkernel(void)
88 88
89 crash_size = crashk_res.end - crashk_res.start + 1; 89 crash_size = crashk_res.end - crashk_res.start + 1;
90 90
91#ifndef CONFIG_RELOCATABLE
91 if (crashk_res.start != KDUMP_KERNELBASE) 92 if (crashk_res.start != KDUMP_KERNELBASE)
92 printk("Crash kernel location must be 0x%x\n", 93 printk("Crash kernel location must be 0x%x\n",
93 KDUMP_KERNELBASE); 94 KDUMP_KERNELBASE);
94 95
95 crashk_res.start = KDUMP_KERNELBASE; 96 crashk_res.start = KDUMP_KERNELBASE;
97#endif
96 crash_size = PAGE_ALIGN(crash_size); 98 crash_size = PAGE_ALIGN(crash_size);
97 crashk_res.end = crashk_res.start + crash_size - 1; 99 crashk_res.end = crashk_res.start + crash_size - 1;
98 100
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index a168514d8609..e6efec788c4d 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -255,11 +255,14 @@ static union thread_union kexec_stack
255/* Our assembly helper, in kexec_stub.S */ 255/* Our assembly helper, in kexec_stub.S */
256extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, 256extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
257 void *image, void *control, 257 void *image, void *control,
258 void (*clear_all)(void)) ATTRIB_NORET; 258 void (*clear_all)(void),
259 unsigned long kdump_flag) ATTRIB_NORET;
259 260
260/* too late to fail here */ 261/* too late to fail here */
261void default_machine_kexec(struct kimage *image) 262void default_machine_kexec(struct kimage *image)
262{ 263{
264 unsigned long kdump_flag = 0;
265
263 /* prepare control code if any */ 266 /* prepare control code if any */
264 267
265 /* 268 /*
@@ -270,8 +273,10 @@ void default_machine_kexec(struct kimage *image)
270 * using debugger IPI. 273 * using debugger IPI.
271 */ 274 */
272 275
273 if (crashing_cpu == -1) 276 if (crashing_cpu == -1)
274 kexec_prepare_cpus(); 277 kexec_prepare_cpus();
278 else
279 kdump_flag = KDUMP_SIGNATURE;
275 280
276 /* switch to a staticly allocated stack. Based on irq stack code. 281 /* switch to a staticly allocated stack. Based on irq stack code.
277 * XXX: the task struct will likely be invalid once we do the copy! 282 * XXX: the task struct will likely be invalid once we do the copy!
@@ -284,7 +289,7 @@ void default_machine_kexec(struct kimage *image)
284 */ 289 */
285 kexec_sequence(&kexec_stack, image->start, image, 290 kexec_sequence(&kexec_stack, image->start, image,
286 page_address(image->control_code_page), 291 page_address(image->control_code_page),
287 ppc_md.hpte_clear_all); 292 ppc_md.hpte_clear_all, kdump_flag);
288 /* NOTREACHED */ 293 /* NOTREACHED */
289} 294}
290 295
@@ -312,11 +317,24 @@ static struct property kernel_end_prop = {
312static void __init export_htab_values(void) 317static void __init export_htab_values(void)
313{ 318{
314 struct device_node *node; 319 struct device_node *node;
320 struct property *prop;
315 321
316 node = of_find_node_by_path("/chosen"); 322 node = of_find_node_by_path("/chosen");
317 if (!node) 323 if (!node)
318 return; 324 return;
319 325
326 /* remove any stale propertys so ours can be found */
327 prop = of_find_property(node, kernel_end_prop.name, NULL);
328 if (prop)
329 prom_remove_property(node, prop);
330 prop = of_find_property(node, htab_base_prop.name, NULL);
331 if (prop)
332 prom_remove_property(node, prop);
333 prop = of_find_property(node, htab_size_prop.name, NULL);
334 if (prop)
335 prom_remove_property(node, prop);
336
337 /* information needed by userspace when using default_machine_kexec */
320 kernel_end = __pa(_end); 338 kernel_end = __pa(_end);
321 prom_add_property(node, &kernel_end_prop); 339 prom_add_property(node, &kernel_end_prop);
322 340
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 3053fe5c62f2..a243fd072a77 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -611,10 +611,12 @@ real_mode: /* assume normal blr return */
611 611
612 612
613/* 613/*
614 * kexec_sequence(newstack, start, image, control, clear_all()) 614 * kexec_sequence(newstack, start, image, control, clear_all(), kdump_flag)
615 * 615 *
616 * does the grungy work with stack switching and real mode switches 616 * does the grungy work with stack switching and real mode switches
617 * also does simple calls to other code 617 * also does simple calls to other code
618 *
619 * kdump_flag says whether the next kernel should be a kdump kernel.
618 */ 620 */
619 621
620_GLOBAL(kexec_sequence) 622_GLOBAL(kexec_sequence)
@@ -647,7 +649,7 @@ _GLOBAL(kexec_sequence)
647 mr r29,r5 /* image (virt) */ 649 mr r29,r5 /* image (virt) */
648 mr r28,r6 /* control, unused */ 650 mr r28,r6 /* control, unused */
649 mr r27,r7 /* clear_all() fn desc */ 651 mr r27,r7 /* clear_all() fn desc */
650 mr r26,r8 /* spare */ 652 mr r26,r8 /* kdump flag */
651 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ 653 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
652 654
653 /* disable interrupts, we are overwriting kernel data next */ 655 /* disable interrupts, we are overwriting kernel data next */
@@ -709,5 +711,6 @@ _GLOBAL(kexec_sequence)
709 mr r4,r30 # start, aka phys mem offset 711 mr r4,r30 # start, aka phys mem offset
710 mtlr 4 712 mtlr 4
711 li r5,0 713 li r5,0
712 blr /* image->start(physid, image->start, 0); */ 714 mr r6,r26 /* kdump_flag */
715 blr /* image->start(physid, image->start, 0, kdump_flag); */
713#endif /* CONFIG_KEXEC */ 716#endif /* CONFIG_KEXEC */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 3815d84a1ef4..1ec73938a00f 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -610,7 +610,8 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
610 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); 610 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
611 611
612 vma->vm_pgoff = offset >> PAGE_SHIFT; 612 vma->vm_pgoff = offset >> PAGE_SHIFT;
613 vma->vm_page_prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 613 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
614 | _PAGE_NO_CACHE | _PAGE_GUARDED);
614 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 615 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
615 vma->vm_end - vma->vm_start, 616 vma->vm_end - vma->vm_start,
616 vma->vm_page_prot); 617 vma->vm_page_prot);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 2fdbc18ae94a..23e0db203329 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -487,67 +487,6 @@ static int __init prom_setprop(phandle node, const char *nodename,
487 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd); 487 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
488} 488}
489 489
490/* We can't use the standard versions because of RELOC headaches. */
491#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
492 || ('a' <= (c) && (c) <= 'f') \
493 || ('A' <= (c) && (c) <= 'F'))
494
495#define isdigit(c) ('0' <= (c) && (c) <= '9')
496#define islower(c) ('a' <= (c) && (c) <= 'z')
497#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
498
499unsigned long prom_strtoul(const char *cp, const char **endp)
500{
501 unsigned long result = 0, base = 10, value;
502
503 if (*cp == '0') {
504 base = 8;
505 cp++;
506 if (toupper(*cp) == 'X') {
507 cp++;
508 base = 16;
509 }
510 }
511
512 while (isxdigit(*cp) &&
513 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
514 result = result * base + value;
515 cp++;
516 }
517
518 if (endp)
519 *endp = cp;
520
521 return result;
522}
523
524unsigned long prom_memparse(const char *ptr, const char **retptr)
525{
526 unsigned long ret = prom_strtoul(ptr, retptr);
527 int shift = 0;
528
529 /*
530 * We can't use a switch here because GCC *may* generate a
531 * jump table which won't work, because we're not running at
532 * the address we're linked at.
533 */
534 if ('G' == **retptr || 'g' == **retptr)
535 shift = 30;
536
537 if ('M' == **retptr || 'm' == **retptr)
538 shift = 20;
539
540 if ('K' == **retptr || 'k' == **retptr)
541 shift = 10;
542
543 if (shift) {
544 ret <<= shift;
545 (*retptr)++;
546 }
547
548 return ret;
549}
550
551/* 490/*
552 * Early parsing of the command line passed to the kernel, used for 491 * Early parsing of the command line passed to the kernel, used for
553 * "mem=x" and the options that affect the iommu 492 * "mem=x" and the options that affect the iommu
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 2c7e8e87f770..ea3a2ec03ffa 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -20,7 +20,7 @@ WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush
20_end enter_prom memcpy memset reloc_offset __secondary_hold 20_end enter_prom memcpy memset reloc_offset __secondary_hold
21__secondary_hold_acknowledge __secondary_hold_spinloop __start 21__secondary_hold_acknowledge __secondary_hold_spinloop __start
22strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 22strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
23reloc_got2 kernstart_addr" 23reloc_got2 kernstart_addr memstart_addr"
24 24
25NM="$1" 25NM="$1"
26OBJ="$2" 26OBJ="$2"
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 5ec56ff03e86..705fc4bf3800 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -59,6 +59,7 @@
59#include <asm/mmu.h> 59#include <asm/mmu.h>
60#include <asm/xmon.h> 60#include <asm/xmon.h>
61#include <asm/cputhreads.h> 61#include <asm/cputhreads.h>
62#include <mm/mmu_decl.h>
62 63
63#include "setup.h" 64#include "setup.h"
64 65
@@ -190,6 +191,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
190 if (ppc_md.show_cpuinfo != NULL) 191 if (ppc_md.show_cpuinfo != NULL)
191 ppc_md.show_cpuinfo(m); 192 ppc_md.show_cpuinfo(m);
192 193
194#ifdef CONFIG_PPC32
195 /* Display the amount of memory */
196 seq_printf(m, "Memory\t\t: %d MB\n",
197 (unsigned int)(total_memory / (1024 * 1024)));
198#endif
199
193 return 0; 200 return 0;
194 } 201 }
195 202
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 65ad925c3a8f..c6a8f2326b6f 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -235,8 +235,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
235 else 235 else
236 for (i = 0; i < 32 ; i++) 236 for (i = 0; i < 32 ; i++)
237 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; 237 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
238
239#else
240#endif 238#endif
241 return err; 239 return err;
242} 240}
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index cb01ebc59387..7b7da8cfd5e8 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -142,7 +142,7 @@ unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock)
142 speed = (clock / prescaler) / (divisor * 16); 142 speed = (clock / prescaler) / (divisor * 16);
143 143
144 /* sanity check */ 144 /* sanity check */
145 if (speed < 0 || speed > (clock / 16)) 145 if (speed > (clock / 16))
146 speed = 9600; 146 speed = 9600;
147 147
148 return speed; 148 return speed;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5c64af174752..8d5b4758c13a 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -382,8 +382,10 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
382 printk(KERN_INFO "Huge page(16GB) memory: " 382 printk(KERN_INFO "Huge page(16GB) memory: "
383 "addr = 0x%lX size = 0x%lX pages = %d\n", 383 "addr = 0x%lX size = 0x%lX pages = %d\n",
384 phys_addr, block_size, expected_pages); 384 phys_addr, block_size, expected_pages);
385 lmb_reserve(phys_addr, block_size * expected_pages); 385 if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) {
386 add_gpage(phys_addr, block_size, expected_pages); 386 lmb_reserve(phys_addr, block_size * expected_pages);
387 add_gpage(phys_addr, block_size, expected_pages);
388 }
387 return 0; 389 return 0;
388} 390}
389#endif /* CONFIG_HUGETLB_PAGE */ 391#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 6cf5c71c431f..eb505ad34a85 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -116,6 +116,7 @@ static int __init get_active_region_work_fn(unsigned long start_pfn,
116 116
117/* 117/*
118 * get_node_active_region - Return active region containing start_pfn 118 * get_node_active_region - Return active region containing start_pfn
119 * Active range returned is empty if none found.
119 * @start_pfn: The page to return the region for. 120 * @start_pfn: The page to return the region for.
120 * @node_ar: Returned set to the active region containing start_pfn 121 * @node_ar: Returned set to the active region containing start_pfn
121 */ 122 */
@@ -126,6 +127,7 @@ static void __init get_node_active_region(unsigned long start_pfn,
126 127
127 node_ar->nid = nid; 128 node_ar->nid = nid;
128 node_ar->start_pfn = start_pfn; 129 node_ar->start_pfn = start_pfn;
130 node_ar->end_pfn = start_pfn;
129 work_with_active_regions(nid, get_active_region_work_fn, node_ar); 131 work_with_active_regions(nid, get_active_region_work_fn, node_ar);
130} 132}
131 133
@@ -526,12 +528,10 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
526 /* 528 /*
527 * We use lmb_end_of_DRAM() in here instead of memory_limit because 529 * We use lmb_end_of_DRAM() in here instead of memory_limit because
528 * we've already adjusted it for the limit and it takes care of 530 * we've already adjusted it for the limit and it takes care of
529 * having memory holes below the limit. 531 * having memory holes below the limit. Also, in the case of
532 * iommu_is_off, memory_limit is not set but is implicitly enforced.
530 */ 533 */
531 534
532 if (! memory_limit)
533 return size;
534
535 if (start + size <= lmb_end_of_DRAM()) 535 if (start + size <= lmb_end_of_DRAM())
536 return size; 536 return size;
537 537
@@ -933,18 +933,20 @@ void __init do_init_bootmem(void)
933 struct node_active_region node_ar; 933 struct node_active_region node_ar;
934 934
935 get_node_active_region(start_pfn, &node_ar); 935 get_node_active_region(start_pfn, &node_ar);
936 while (start_pfn < end_pfn) { 936 while (start_pfn < end_pfn &&
937 node_ar.start_pfn < node_ar.end_pfn) {
938 unsigned long reserve_size = size;
937 /* 939 /*
938 * if reserved region extends past active region 940 * if reserved region extends past active region
939 * then trim size to active region 941 * then trim size to active region
940 */ 942 */
941 if (end_pfn > node_ar.end_pfn) 943 if (end_pfn > node_ar.end_pfn)
942 size = (node_ar.end_pfn << PAGE_SHIFT) 944 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
943 - (start_pfn << PAGE_SHIFT); 945 - (start_pfn << PAGE_SHIFT);
944 dbg("reserve_bootmem %lx %lx nid=%d\n", physbase, size, 946 dbg("reserve_bootmem %lx %lx nid=%d\n", physbase,
945 node_ar.nid); 947 reserve_size, node_ar.nid);
946 reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase, 948 reserve_bootmem_node(NODE_DATA(node_ar.nid), physbase,
947 size, BOOTMEM_DEFAULT); 949 reserve_size, BOOTMEM_DEFAULT);
948 /* 950 /*
949 * if reserved region is contained in the active region 951 * if reserved region is contained in the active region
950 * then done. 952 * then done.
@@ -959,6 +961,7 @@ void __init do_init_bootmem(void)
959 */ 961 */
960 start_pfn = node_ar.end_pfn; 962 start_pfn = node_ar.end_pfn;
961 physbase = start_pfn << PAGE_SHIFT; 963 physbase = start_pfn << PAGE_SHIFT;
964 size = size - reserve_size;
962 get_node_active_region(start_pfn, &node_ar); 965 get_node_active_region(start_pfn, &node_ar);
963 } 966 }
964 967
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h
index 22e4e8d4eb2c..628009c01958 100644
--- a/arch/powerpc/oprofile/cell/pr_util.h
+++ b/arch/powerpc/oprofile/cell/pr_util.h
@@ -24,6 +24,11 @@
24#define SKIP_GENERIC_SYNC 0 24#define SKIP_GENERIC_SYNC 0
25#define SYNC_START_ERROR -1 25#define SYNC_START_ERROR -1
26#define DO_GENERIC_SYNC 1 26#define DO_GENERIC_SYNC 1
27#define SPUS_PER_NODE 8
28#define DEFAULT_TIMER_EXPIRE (HZ / 10)
29
30extern struct delayed_work spu_work;
31extern int spu_prof_running;
27 32
28struct spu_overlay_info { /* map of sections within an SPU overlay */ 33struct spu_overlay_info { /* map of sections within an SPU overlay */
29 unsigned int vma; /* SPU virtual memory address from elf */ 34 unsigned int vma; /* SPU virtual memory address from elf */
@@ -62,6 +67,14 @@ struct vma_to_fileoffset_map { /* map of sections within an SPU program */
62 67
63}; 68};
64 69
70struct spu_buffer {
71 int last_guard_val;
72 int ctx_sw_seen;
73 unsigned long *buff;
74 unsigned int head, tail;
75};
76
77
65/* The three functions below are for maintaining and accessing 78/* The three functions below are for maintaining and accessing
66 * the vma-to-fileoffset map. 79 * the vma-to-fileoffset map.
67 */ 80 */
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
index 380d7e217531..6edaebd5099a 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -23,12 +23,11 @@
23 23
24static u32 *samples; 24static u32 *samples;
25 25
26static int spu_prof_running; 26int spu_prof_running;
27static unsigned int profiling_interval; 27static unsigned int profiling_interval;
28 28
29#define NUM_SPU_BITS_TRBUF 16 29#define NUM_SPU_BITS_TRBUF 16
30#define SPUS_PER_TB_ENTRY 4 30#define SPUS_PER_TB_ENTRY 4
31#define SPUS_PER_NODE 8
32 31
33#define SPU_PC_MASK 0xFFFF 32#define SPU_PC_MASK 0xFFFF
34 33
@@ -208,6 +207,7 @@ int start_spu_profiling(unsigned int cycles_reset)
208 207
209 spu_prof_running = 1; 208 spu_prof_running = 1;
210 hrtimer_start(&timer, kt, HRTIMER_MODE_REL); 209 hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
210 schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
211 211
212 return 0; 212 return 0;
213} 213}
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index 2a9b4a049329..2949126d28d1 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -35,7 +35,102 @@ static DEFINE_SPINLOCK(buffer_lock);
35static DEFINE_SPINLOCK(cache_lock); 35static DEFINE_SPINLOCK(cache_lock);
36static int num_spu_nodes; 36static int num_spu_nodes;
37int spu_prof_num_nodes; 37int spu_prof_num_nodes;
38int last_guard_val[MAX_NUMNODES * 8]; 38
39struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE];
40struct delayed_work spu_work;
41static unsigned max_spu_buff;
42
43static void spu_buff_add(unsigned long int value, int spu)
44{
45 /* spu buff is a circular buffer. Add entries to the
46 * head. Head is the index to store the next value.
47 * The buffer is full when there is one available entry
48 * in the queue, i.e. head and tail can't be equal.
49 * That way we can tell the difference between the
50 * buffer being full versus empty.
51 *
52 * ASSUPTION: the buffer_lock is held when this function
53 * is called to lock the buffer, head and tail.
54 */
55 int full = 1;
56
57 if (spu_buff[spu].head >= spu_buff[spu].tail) {
58 if ((spu_buff[spu].head - spu_buff[spu].tail)
59 < (max_spu_buff - 1))
60 full = 0;
61
62 } else if (spu_buff[spu].tail > spu_buff[spu].head) {
63 if ((spu_buff[spu].tail - spu_buff[spu].head)
64 > 1)
65 full = 0;
66 }
67
68 if (!full) {
69 spu_buff[spu].buff[spu_buff[spu].head] = value;
70 spu_buff[spu].head++;
71
72 if (spu_buff[spu].head >= max_spu_buff)
73 spu_buff[spu].head = 0;
74 } else {
75 /* From the user's perspective make the SPU buffer
76 * size management/overflow look like we are using
77 * per cpu buffers. The user uses the same
78 * per cpu parameter to adjust the SPU buffer size.
79 * Increment the sample_lost_overflow to inform
80 * the user the buffer size needs to be increased.
81 */
82 oprofile_cpu_buffer_inc_smpl_lost();
83 }
84}
85
86/* This function copies the per SPU buffers to the
87 * OProfile kernel buffer.
88 */
89void sync_spu_buff(void)
90{
91 int spu;
92 unsigned long flags;
93 int curr_head;
94
95 for (spu = 0; spu < num_spu_nodes; spu++) {
96 /* In case there was an issue and the buffer didn't
97 * get created skip it.
98 */
99 if (spu_buff[spu].buff == NULL)
100 continue;
101
102 /* Hold the lock to make sure the head/tail
103 * doesn't change while spu_buff_add() is
104 * deciding if the buffer is full or not.
105 * Being a little paranoid.
106 */
107 spin_lock_irqsave(&buffer_lock, flags);
108 curr_head = spu_buff[spu].head;
109 spin_unlock_irqrestore(&buffer_lock, flags);
110
111 /* Transfer the current contents to the kernel buffer.
112 * data can still be added to the head of the buffer.
113 */
114 oprofile_put_buff(spu_buff[spu].buff,
115 spu_buff[spu].tail,
116 curr_head, max_spu_buff);
117
118 spin_lock_irqsave(&buffer_lock, flags);
119 spu_buff[spu].tail = curr_head;
120 spin_unlock_irqrestore(&buffer_lock, flags);
121 }
122
123}
124
125static void wq_sync_spu_buff(struct work_struct *work)
126{
127 /* move data from spu buffers to kernel buffer */
128 sync_spu_buff();
129
130 /* only reschedule if profiling is not done */
131 if (spu_prof_running)
132 schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
133}
39 134
40/* Container for caching information about an active SPU task. */ 135/* Container for caching information about an active SPU task. */
41struct cached_info { 136struct cached_info {
@@ -305,14 +400,21 @@ static int process_context_switch(struct spu *spu, unsigned long objectId)
305 400
306 /* Record context info in event buffer */ 401 /* Record context info in event buffer */
307 spin_lock_irqsave(&buffer_lock, flags); 402 spin_lock_irqsave(&buffer_lock, flags);
308 add_event_entry(ESCAPE_CODE); 403 spu_buff_add(ESCAPE_CODE, spu->number);
309 add_event_entry(SPU_CTX_SWITCH_CODE); 404 spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number);
310 add_event_entry(spu->number); 405 spu_buff_add(spu->number, spu->number);
311 add_event_entry(spu->pid); 406 spu_buff_add(spu->pid, spu->number);
312 add_event_entry(spu->tgid); 407 spu_buff_add(spu->tgid, spu->number);
313 add_event_entry(app_dcookie); 408 spu_buff_add(app_dcookie, spu->number);
314 add_event_entry(spu_cookie); 409 spu_buff_add(spu_cookie, spu->number);
315 add_event_entry(offset); 410 spu_buff_add(offset, spu->number);
411
412 /* Set flag to indicate SPU PC data can now be written out. If
413 * the SPU program counter data is seen before an SPU context
414 * record is seen, the postprocessing will fail.
415 */
416 spu_buff[spu->number].ctx_sw_seen = 1;
417
316 spin_unlock_irqrestore(&buffer_lock, flags); 418 spin_unlock_irqrestore(&buffer_lock, flags);
317 smp_wmb(); /* insure spu event buffer updates are written */ 419 smp_wmb(); /* insure spu event buffer updates are written */
318 /* don't want entries intermingled... */ 420 /* don't want entries intermingled... */
@@ -360,6 +462,47 @@ static int number_of_online_nodes(void)
360 return nodes; 462 return nodes;
361} 463}
362 464
465static int oprofile_spu_buff_create(void)
466{
467 int spu;
468
469 max_spu_buff = oprofile_get_cpu_buffer_size();
470
471 for (spu = 0; spu < num_spu_nodes; spu++) {
472 /* create circular buffers to store the data in.
473 * use locks to manage accessing the buffers
474 */
475 spu_buff[spu].head = 0;
476 spu_buff[spu].tail = 0;
477
478 /*
479 * Create a buffer for each SPU. Can't reliably
480 * create a single buffer for all spus due to not
481 * enough contiguous kernel memory.
482 */
483
484 spu_buff[spu].buff = kzalloc((max_spu_buff
485 * sizeof(unsigned long)),
486 GFP_KERNEL);
487
488 if (!spu_buff[spu].buff) {
489 printk(KERN_ERR "SPU_PROF: "
490 "%s, line %d: oprofile_spu_buff_create "
491 "failed to allocate spu buffer %d.\n",
492 __func__, __LINE__, spu);
493
494 /* release the spu buffers that have been allocated */
495 while (spu >= 0) {
496 kfree(spu_buff[spu].buff);
497 spu_buff[spu].buff = 0;
498 spu--;
499 }
500 return -ENOMEM;
501 }
502 }
503 return 0;
504}
505
363/* The main purpose of this function is to synchronize 506/* The main purpose of this function is to synchronize
364 * OProfile with SPUFS by registering to be notified of 507 * OProfile with SPUFS by registering to be notified of
365 * SPU task switches. 508 * SPU task switches.
@@ -372,20 +515,35 @@ static int number_of_online_nodes(void)
372 */ 515 */
373int spu_sync_start(void) 516int spu_sync_start(void)
374{ 517{
375 int k; 518 int spu;
376 int ret = SKIP_GENERIC_SYNC; 519 int ret = SKIP_GENERIC_SYNC;
377 int register_ret; 520 int register_ret;
378 unsigned long flags = 0; 521 unsigned long flags = 0;
379 522
380 spu_prof_num_nodes = number_of_online_nodes(); 523 spu_prof_num_nodes = number_of_online_nodes();
381 num_spu_nodes = spu_prof_num_nodes * 8; 524 num_spu_nodes = spu_prof_num_nodes * 8;
525 INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff);
526
527 /* create buffer for storing the SPU data to put in
528 * the kernel buffer.
529 */
530 ret = oprofile_spu_buff_create();
531 if (ret)
532 goto out;
382 533
383 spin_lock_irqsave(&buffer_lock, flags); 534 spin_lock_irqsave(&buffer_lock, flags);
384 add_event_entry(ESCAPE_CODE); 535 for (spu = 0; spu < num_spu_nodes; spu++) {
385 add_event_entry(SPU_PROFILING_CODE); 536 spu_buff_add(ESCAPE_CODE, spu);
386 add_event_entry(num_spu_nodes); 537 spu_buff_add(SPU_PROFILING_CODE, spu);
538 spu_buff_add(num_spu_nodes, spu);
539 }
387 spin_unlock_irqrestore(&buffer_lock, flags); 540 spin_unlock_irqrestore(&buffer_lock, flags);
388 541
542 for (spu = 0; spu < num_spu_nodes; spu++) {
543 spu_buff[spu].ctx_sw_seen = 0;
544 spu_buff[spu].last_guard_val = 0;
545 }
546
389 /* Register for SPU events */ 547 /* Register for SPU events */
390 register_ret = spu_switch_event_register(&spu_active); 548 register_ret = spu_switch_event_register(&spu_active);
391 if (register_ret) { 549 if (register_ret) {
@@ -393,8 +551,6 @@ int spu_sync_start(void)
393 goto out; 551 goto out;
394 } 552 }
395 553
396 for (k = 0; k < (MAX_NUMNODES * 8); k++)
397 last_guard_val[k] = 0;
398 pr_debug("spu_sync_start -- running.\n"); 554 pr_debug("spu_sync_start -- running.\n");
399out: 555out:
400 return ret; 556 return ret;
@@ -446,13 +602,20 @@ void spu_sync_buffer(int spu_num, unsigned int *samples,
446 * use. We need to discard samples taken during the time 602 * use. We need to discard samples taken during the time
447 * period which an overlay occurs (i.e., guard value changes). 603 * period which an overlay occurs (i.e., guard value changes).
448 */ 604 */
449 if (grd_val && grd_val != last_guard_val[spu_num]) { 605 if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) {
450 last_guard_val[spu_num] = grd_val; 606 spu_buff[spu_num].last_guard_val = grd_val;
451 /* Drop the rest of the samples. */ 607 /* Drop the rest of the samples. */
452 break; 608 break;
453 } 609 }
454 610
455 add_event_entry(file_offset | spu_num_shifted); 611 /* We must ensure that the SPU context switch has been written
612 * out before samples for the SPU. Otherwise, the SPU context
613 * information is not available and the postprocessing of the
614 * SPU PC will fail with no available anonymous map information.
615 */
616 if (spu_buff[spu_num].ctx_sw_seen)
617 spu_buff_add((file_offset | spu_num_shifted),
618 spu_num);
456 } 619 }
457 spin_unlock(&buffer_lock); 620 spin_unlock(&buffer_lock);
458out: 621out:
@@ -463,20 +626,41 @@ out:
463int spu_sync_stop(void) 626int spu_sync_stop(void)
464{ 627{
465 unsigned long flags = 0; 628 unsigned long flags = 0;
466 int ret = spu_switch_event_unregister(&spu_active); 629 int ret;
467 if (ret) { 630 int k;
631
632 ret = spu_switch_event_unregister(&spu_active);
633
634 if (ret)
468 printk(KERN_ERR "SPU_PROF: " 635 printk(KERN_ERR "SPU_PROF: "
469 "%s, line %d: spu_switch_event_unregister returned %d\n", 636 "%s, line %d: spu_switch_event_unregister " \
470 __func__, __LINE__, ret); 637 "returned %d\n",
471 goto out; 638 __func__, __LINE__, ret);
472 } 639
640 /* flush any remaining data in the per SPU buffers */
641 sync_spu_buff();
473 642
474 spin_lock_irqsave(&cache_lock, flags); 643 spin_lock_irqsave(&cache_lock, flags);
475 ret = release_cached_info(RELEASE_ALL); 644 ret = release_cached_info(RELEASE_ALL);
476 spin_unlock_irqrestore(&cache_lock, flags); 645 spin_unlock_irqrestore(&cache_lock, flags);
477out: 646
647 /* remove scheduled work queue item rather then waiting
648 * for every queued entry to execute. Then flush pending
649 * system wide buffer to event buffer.
650 */
651 cancel_delayed_work(&spu_work);
652
653 for (k = 0; k < num_spu_nodes; k++) {
654 spu_buff[k].ctx_sw_seen = 0;
655
656 /*
657 * spu_sys_buff will be null if there was a problem
658 * allocating the buffer. Only delete if it exists.
659 */
660 kfree(spu_buff[k].buff);
661 spu_buff[k].buff = 0;
662 }
478 pr_debug("spu_sync_stop -- done.\n"); 663 pr_debug("spu_sync_stop -- done.\n");
479 return ret; 664 return ret;
480} 665}
481 666
482
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 5ff4de3eb3be..35141a8bc3d9 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -404,7 +404,7 @@ set_count_mode(u32 kernel, u32 user)
404 } 404 }
405} 405}
406 406
407static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl) 407static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
408{ 408{
409 409
410 pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE; 410 pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index a9260e21451e..65730275e012 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -14,6 +14,15 @@
14# help 14# help
15# This option enables support for the CPCI405 board. 15# This option enables support for the CPCI405 board.
16 16
17config ACADIA
18 bool "Acadia"
19 depends on 40x
20 default n
21 select PPC40x_SIMPLE
22 select 405EZ
23 help
24 This option enables support for the AMCC 405EZ Acadia evaluation board.
25
17config EP405 26config EP405
18 bool "EP405/EP405PC" 27 bool "EP405/EP405PC"
19 depends on 40x 28 depends on 40x
@@ -23,6 +32,14 @@ config EP405
23 help 32 help
24 This option enables support for the EP405/EP405PC boards. 33 This option enables support for the EP405/EP405PC boards.
25 34
35config HCU4
36 bool "Hcu4"
37 depends on 40x
38 default y
39 select 405GPR
40 help
41 This option enables support for the Nestal Maschinen HCU4 board.
42
26config KILAUEA 43config KILAUEA
27 bool "Kilauea" 44 bool "Kilauea"
28 depends on 40x 45 depends on 40x
@@ -93,6 +110,13 @@ config XILINX_VIRTEX_GENERIC_BOARD
93 Most Virtex designs should use this unless it needs to do some 110 Most Virtex designs should use this unless it needs to do some
94 special configuration at board probe time. 111 special configuration at board probe time.
95 112
113config PPC40x_SIMPLE
114 bool "Simple PowerPC 40x board support"
115 depends on 40x
116 default n
117 help
118 This option enables the simple PowerPC 40x platform support.
119
96# 40x specific CPU modules, selected based on the board above. 120# 40x specific CPU modules, selected based on the board above.
97config NP405H 121config NP405H
98 bool 122 bool
@@ -118,6 +142,12 @@ config 405EX
118 select IBM_NEW_EMAC_EMAC4 142 select IBM_NEW_EMAC_EMAC4
119 select IBM_NEW_EMAC_RGMII 143 select IBM_NEW_EMAC_RGMII
120 144
145config 405EZ
146 bool
147 select IBM_NEW_EMAC_NO_FLOW_CTRL
148 select IBM_NEW_EMAC_MAL_CLR_ICINTSTAT
149 select IBM_NEW_EMAC_MAL_COMMON_ERR
150
121config 405GPR 151config 405GPR
122 bool 152 bool
123 153
@@ -139,6 +169,14 @@ config STB03xxx
139 select IBM405_ERR77 169 select IBM405_ERR77
140 select IBM405_ERR51 170 select IBM405_ERR51
141 171
172config PPC4xx_GPIO
173 bool "PPC4xx GPIO support"
174 depends on 40x
175 select ARCH_REQUIRE_GPIOLIB
176 select GENERIC_GPIO
177 help
178 Enable gpiolib support for ppc40x based boards
179
142# 40x errata/workaround config symbols, selected by the CPU models above 180# 40x errata/workaround config symbols, selected by the CPU models above
143 181
144# All 405-based cores up until the 405GPR and 405EP have this errata. 182# All 405-based cores up until the 405GPR and 405EP have this errata.
diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile
index 5533a5c8ce4e..9bab76a652a6 100644
--- a/arch/powerpc/platforms/40x/Makefile
+++ b/arch/powerpc/platforms/40x/Makefile
@@ -1,5 +1,7 @@
1obj-$(CONFIG_KILAUEA) += kilauea.o 1obj-$(CONFIG_KILAUEA) += kilauea.o
2obj-$(CONFIG_HCU4) += hcu4.o
2obj-$(CONFIG_MAKALU) += makalu.o 3obj-$(CONFIG_MAKALU) += makalu.o
3obj-$(CONFIG_WALNUT) += walnut.o 4obj-$(CONFIG_WALNUT) += walnut.o
4obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o 5obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o
5obj-$(CONFIG_EP405) += ep405.o 6obj-$(CONFIG_EP405) += ep405.o
7obj-$(CONFIG_PPC40x_SIMPLE) += ppc40x_simple.o
diff --git a/arch/powerpc/platforms/40x/hcu4.c b/arch/powerpc/platforms/40x/hcu4.c
new file mode 100644
index 000000000000..60b2afecab75
--- /dev/null
+++ b/arch/powerpc/platforms/40x/hcu4.c
@@ -0,0 +1,61 @@
1/*
2 * Architecture- / platform-specific boot-time initialization code for
3 * IBM PowerPC 4xx based boards. Adapted from original
4 * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
5 * <dan@net4x.com>.
6 *
7 * Copyright(c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
8 *
9 * Rewritten and ported to the merged powerpc tree:
10 * Copyright 2007 IBM Corporation
11 * Josh Boyer <jwboyer@linux.vnet.ibm.com>
12 *
13 * 2002 (c) MontaVista, Software, Inc. This file is licensed under
14 * the terms of the GNU General Public License version 2. This program
15 * is licensed "as is" without any warranty of any kind, whether express
16 * or implied.
17 */
18
19#include <linux/init.h>
20#include <linux/of_platform.h>
21
22#include <asm/machdep.h>
23#include <asm/prom.h>
24#include <asm/udbg.h>
25#include <asm/time.h>
26#include <asm/uic.h>
27#include <asm/ppc4xx.h>
28
29static __initdata struct of_device_id hcu4_of_bus[] = {
30 { .compatible = "ibm,plb3", },
31 { .compatible = "ibm,opb", },
32 { .compatible = "ibm,ebc", },
33 {},
34};
35
36static int __init hcu4_device_probe(void)
37{
38 of_platform_bus_probe(NULL, hcu4_of_bus, NULL);
39 return 0;
40}
41machine_device_initcall(hcu4, hcu4_device_probe);
42
43static int __init hcu4_probe(void)
44{
45 unsigned long root = of_get_flat_dt_root();
46
47 if (!of_flat_dt_is_compatible(root, "netstal,hcu4"))
48 return 0;
49
50 return 1;
51}
52
53define_machine(hcu4) {
54 .name = "HCU4",
55 .probe = hcu4_probe,
56 .progress = udbg_progress,
57 .init_IRQ = uic_init_tree,
58 .get_irq = uic_get_irq,
59 .restart = ppc4xx_reset_system,
60 .calibrate_decr = generic_calibrate_decr,
61};
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
new file mode 100644
index 000000000000..4498a86b46c3
--- /dev/null
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -0,0 +1,80 @@
1/*
2 * Generic PowerPC 40x platform support
3 *
4 * Copyright 2008 IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
9 *
10 * This implements simple platform support for PowerPC 44x chips. This is
11 * mostly used for eval boards or other simple and "generic" 44x boards. If
12 * your board has custom functions or hardware, then you will likely want to
13 * implement your own board.c file to accommodate it.
14 */
15
16#include <asm/machdep.h>
17#include <asm/pci-bridge.h>
18#include <asm/ppc4xx.h>
19#include <asm/prom.h>
20#include <asm/time.h>
21#include <asm/udbg.h>
22#include <asm/uic.h>
23
24#include <linux/init.h>
25#include <linux/of_platform.h>
26
27static __initdata struct of_device_id ppc40x_of_bus[] = {
28 { .compatible = "ibm,plb3", },
29 { .compatible = "ibm,plb4", },
30 { .compatible = "ibm,opb", },
31 { .compatible = "ibm,ebc", },
32 { .compatible = "simple-bus", },
33 {},
34};
35
36static int __init ppc40x_device_probe(void)
37{
38 of_platform_bus_probe(NULL, ppc40x_of_bus, NULL);
39
40 return 0;
41}
42machine_device_initcall(ppc40x_simple, ppc40x_device_probe);
43
44/* This is the list of boards that can be supported by this simple
45 * platform code. This does _not_ mean the boards are compatible,
46 * as they most certainly are not from a device tree perspective.
47 * However, their differences are handled by the device tree and the
48 * drivers and therefore they don't need custom board support files.
49 *
50 * Again, if your board needs to do things differently then create a
51 * board.c file for it rather than adding it to this list.
52 */
53static char *board[] __initdata = {
54 "amcc,acadia"
55};
56
57static int __init ppc40x_probe(void)
58{
59 unsigned long root = of_get_flat_dt_root();
60 int i = 0;
61
62 for (i = 0; i < ARRAY_SIZE(board); i++) {
63 if (of_flat_dt_is_compatible(root, board[i])) {
64 ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
65 return 1;
66 }
67 }
68
69 return 0;
70}
71
72define_machine(ppc40x_simple) {
73 .name = "PowerPC 40x Platform",
74 .probe = ppc40x_probe,
75 .progress = udbg_progress,
76 .init_IRQ = uic_init_tree,
77 .get_irq = uic_get_irq,
78 .restart = ppc4xx_reset_system,
79 .calibrate_decr = generic_calibrate_decr,
80};
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 79c1154f88d4..3496bc05058e 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -167,6 +167,14 @@ config PPC44x_SIMPLE
167 help 167 help
168 This option enables the simple PowerPC 44x platform support. 168 This option enables the simple PowerPC 44x platform support.
169 169
170config PPC4xx_GPIO
171 bool "PPC4xx GPIO support"
172 depends on 44x
173 select ARCH_REQUIRE_GPIOLIB
174 select GENERIC_GPIO
175 help
176 Enable gpiolib support for ppc440 based boards
177
170# 44x specific CPU modules, selected based on the board above. 178# 44x specific CPU modules, selected based on the board above.
171config 440EP 179config 440EP
172 bool 180 bool
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 044b4e6e8743..ae7c34f37e1c 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -99,11 +99,14 @@ mpc5200_setup_xlb_arbiter(void)
99 out_be32(&xlb->master_pri_enable, 0xff); 99 out_be32(&xlb->master_pri_enable, 0xff);
100 out_be32(&xlb->master_priority, 0x11111111); 100 out_be32(&xlb->master_priority, 0x11111111);
101 101
102 /* Disable XLB pipelining 102 /*
103 * Disable XLB pipelining
103 * (cfr errate 292. We could do this only just before ATA PIO 104 * (cfr errate 292. We could do this only just before ATA PIO
104 * transaction and re-enable it afterwards ...) 105 * transaction and re-enable it afterwards ...)
106 * Not needed on MPC5200B.
105 */ 107 */
106 out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS); 108 if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
109 out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS);
107 110
108 iounmap(xlb); 111 iounmap(xlb);
109} 112}
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index 8a3b117b6ce2..81cee7bbf2d2 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -193,7 +193,6 @@ static void __init ksi8560_setup_arch(void)
193static void ksi8560_show_cpuinfo(struct seq_file *m) 193static void ksi8560_show_cpuinfo(struct seq_file *m)
194{ 194{
195 uint pvid, svid, phid1; 195 uint pvid, svid, phid1;
196 uint memsize = total_memory;
197 196
198 pvid = mfspr(SPRN_PVR); 197 pvid = mfspr(SPRN_PVR);
199 svid = mfspr(SPRN_SVR); 198 svid = mfspr(SPRN_SVR);
@@ -215,9 +214,6 @@ static void ksi8560_show_cpuinfo(struct seq_file *m)
215 /* Display cpu Pll setting */ 214 /* Display cpu Pll setting */
216 phid1 = mfspr(SPRN_HID1); 215 phid1 = mfspr(SPRN_HID1);
217 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 216 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
218
219 /* Display the amount of memory */
220 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
221} 217}
222 218
223static struct of_device_id __initdata of_bus_ids[] = { 219static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 0293e3d3580f..21f009023e26 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -207,7 +207,6 @@ static void __init mpc85xx_ads_setup_arch(void)
207static void mpc85xx_ads_show_cpuinfo(struct seq_file *m) 207static void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
208{ 208{
209 uint pvid, svid, phid1; 209 uint pvid, svid, phid1;
210 uint memsize = total_memory;
211 210
212 pvid = mfspr(SPRN_PVR); 211 pvid = mfspr(SPRN_PVR);
213 svid = mfspr(SPRN_SVR); 212 svid = mfspr(SPRN_SVR);
@@ -219,9 +218,6 @@ static void mpc85xx_ads_show_cpuinfo(struct seq_file *m)
219 /* Display cpu Pll setting */ 218 /* Display cpu Pll setting */
220 phid1 = mfspr(SPRN_HID1); 219 phid1 = mfspr(SPRN_HID1);
221 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 220 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
222
223 /* Display the amount of memory */
224 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
225} 221}
226 222
227static struct of_device_id __initdata of_bus_ids[] = { 223static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 50d7ea8f922b..aeb6a5bc5522 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -307,7 +307,6 @@ static void __init mpc85xx_cds_setup_arch(void)
307static void mpc85xx_cds_show_cpuinfo(struct seq_file *m) 307static void mpc85xx_cds_show_cpuinfo(struct seq_file *m)
308{ 308{
309 uint pvid, svid, phid1; 309 uint pvid, svid, phid1;
310 uint memsize = total_memory;
311 310
312 pvid = mfspr(SPRN_PVR); 311 pvid = mfspr(SPRN_PVR);
313 svid = mfspr(SPRN_SVR); 312 svid = mfspr(SPRN_SVR);
@@ -320,9 +319,6 @@ static void mpc85xx_cds_show_cpuinfo(struct seq_file *m)
320 /* Display cpu Pll setting */ 319 /* Display cpu Pll setting */
321 phid1 = mfspr(SPRN_HID1); 320 phid1 = mfspr(SPRN_HID1);
322 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 321 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
323
324 /* Display the amount of memory */
325 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
326} 322}
327 323
328 324
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index b9246ea0928a..7ec77ce12dad 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -136,7 +136,6 @@ static void __init sbc8548_setup_arch(void)
136static void sbc8548_show_cpuinfo(struct seq_file *m) 136static void sbc8548_show_cpuinfo(struct seq_file *m)
137{ 137{
138 uint pvid, svid, phid1; 138 uint pvid, svid, phid1;
139 uint memsize = total_memory;
140 139
141 pvid = mfspr(SPRN_PVR); 140 pvid = mfspr(SPRN_PVR);
142 svid = mfspr(SPRN_SVR); 141 svid = mfspr(SPRN_SVR);
@@ -149,9 +148,6 @@ static void sbc8548_show_cpuinfo(struct seq_file *m)
149 /* Display cpu Pll setting */ 148 /* Display cpu Pll setting */
150 phid1 = mfspr(SPRN_HID1); 149 phid1 = mfspr(SPRN_HID1);
151 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 150 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
152
153 /* Display the amount of memory */
154 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
155} 151}
156 152
157static struct of_device_id __initdata of_bus_ids[] = { 153static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index 0c9a856f66b6..472f254a19d2 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -194,7 +194,6 @@ static void __init sbc8560_setup_arch(void)
194static void sbc8560_show_cpuinfo(struct seq_file *m) 194static void sbc8560_show_cpuinfo(struct seq_file *m)
195{ 195{
196 uint pvid, svid, phid1; 196 uint pvid, svid, phid1;
197 uint memsize = total_memory;
198 197
199 pvid = mfspr(SPRN_PVR); 198 pvid = mfspr(SPRN_PVR);
200 svid = mfspr(SPRN_SVR); 199 svid = mfspr(SPRN_SVR);
@@ -206,9 +205,6 @@ static void sbc8560_show_cpuinfo(struct seq_file *m)
206 /* Display cpu Pll setting */ 205 /* Display cpu Pll setting */
207 phid1 = mfspr(SPRN_HID1); 206 phid1 = mfspr(SPRN_HID1);
208 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 207 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
209
210 /* Display the amount of memory */
211 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
212} 208}
213 209
214static struct of_device_id __initdata of_bus_ids[] = { 210static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index 18499d7c9d9e..0cca8f5cb272 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -130,7 +130,6 @@ static void __init stx_gp3_setup_arch(void)
130static void stx_gp3_show_cpuinfo(struct seq_file *m) 130static void stx_gp3_show_cpuinfo(struct seq_file *m)
131{ 131{
132 uint pvid, svid, phid1; 132 uint pvid, svid, phid1;
133 uint memsize = total_memory;
134 133
135 pvid = mfspr(SPRN_PVR); 134 pvid = mfspr(SPRN_PVR);
136 svid = mfspr(SPRN_SVR); 135 svid = mfspr(SPRN_SVR);
@@ -142,9 +141,6 @@ static void stx_gp3_show_cpuinfo(struct seq_file *m)
142 /* Display cpu Pll setting */ 141 /* Display cpu Pll setting */
143 phid1 = mfspr(SPRN_HID1); 142 phid1 = mfspr(SPRN_HID1);
144 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 143 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
145
146 /* Display the amount of memory */
147 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
148} 144}
149 145
150static struct of_device_id __initdata of_bus_ids[] = { 146static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index d850880d6964..2933a8e827d9 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -138,7 +138,6 @@ static void __init tqm85xx_setup_arch(void)
138static void tqm85xx_show_cpuinfo(struct seq_file *m) 138static void tqm85xx_show_cpuinfo(struct seq_file *m)
139{ 139{
140 uint pvid, svid, phid1; 140 uint pvid, svid, phid1;
141 uint memsize = total_memory;
142 141
143 pvid = mfspr(SPRN_PVR); 142 pvid = mfspr(SPRN_PVR);
144 svid = mfspr(SPRN_SVR); 143 svid = mfspr(SPRN_SVR);
@@ -150,9 +149,6 @@ static void tqm85xx_show_cpuinfo(struct seq_file *m)
150 /* Display cpu Pll setting */ 149 /* Display cpu Pll setting */
151 phid1 = mfspr(SPRN_HID1); 150 phid1 = mfspr(SPRN_HID1);
152 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); 151 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
153
154 /* Display the amount of memory */
155 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
156} 152}
157 153
158static struct of_device_id __initdata of_bus_ids[] = { 154static struct of_device_id __initdata of_bus_ids[] = {
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index 821c45fac18b..fb371f5ce132 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -127,7 +127,6 @@ static unsigned int gef_sbc610_get_fpga_rev(void)
127 127
128static void gef_sbc610_show_cpuinfo(struct seq_file *m) 128static void gef_sbc610_show_cpuinfo(struct seq_file *m)
129{ 129{
130 uint memsize = total_memory;
131 uint svid = mfspr(SPRN_SVR); 130 uint svid = mfspr(SPRN_SVR);
132 131
133 seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n"); 132 seq_printf(m, "Vendor\t\t: GE Fanuc Intelligent Platforms\n");
@@ -137,7 +136,6 @@ static void gef_sbc610_show_cpuinfo(struct seq_file *m)
137 seq_printf(m, "FPGA Revision\t: %u\n", gef_sbc610_get_fpga_rev()); 136 seq_printf(m, "FPGA Revision\t: %u\n", gef_sbc610_get_fpga_rev());
138 137
139 seq_printf(m, "SVR\t\t: 0x%x\n", svid); 138 seq_printf(m, "SVR\t\t: 0x%x\n", svid);
140 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
141} 139}
142 140
143static void __init gef_sbc610_nec_fixup(struct pci_dev *pdev) 141static void __init gef_sbc610_nec_fixup(struct pci_dev *pdev)
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 2672829a71dc..27e0e682d8e1 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -101,13 +101,11 @@ mpc86xx_hpcn_setup_arch(void)
101static void 101static void
102mpc86xx_hpcn_show_cpuinfo(struct seq_file *m) 102mpc86xx_hpcn_show_cpuinfo(struct seq_file *m)
103{ 103{
104 uint memsize = total_memory;
105 uint svid = mfspr(SPRN_SVR); 104 uint svid = mfspr(SPRN_SVR);
106 105
107 seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); 106 seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
108 107
109 seq_printf(m, "SVR\t\t: 0x%x\n", svid); 108 seq_printf(m, "SVR\t\t: 0x%x\n", svid);
110 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
111} 109}
112 110
113 111
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c
index da677a74e2d1..5fd7ed40986f 100644
--- a/arch/powerpc/platforms/86xx/sbc8641d.c
+++ b/arch/powerpc/platforms/86xx/sbc8641d.c
@@ -63,13 +63,11 @@ sbc8641_setup_arch(void)
63static void 63static void
64sbc8641_show_cpuinfo(struct seq_file *m) 64sbc8641_show_cpuinfo(struct seq_file *m)
65{ 65{
66 uint memsize = total_memory;
67 uint svid = mfspr(SPRN_SVR); 66 uint svid = mfspr(SPRN_SVR);
68 67
69 seq_printf(m, "Vendor\t\t: Wind River Systems\n"); 68 seq_printf(m, "Vendor\t\t: Wind River Systems\n");
70 69
71 seq_printf(m, "SVR\t\t: 0x%x\n", svid); 70 seq_printf(m, "SVR\t\t: 0x%x\n", svid);
72 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
73} 71}
74 72
75 73
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 2a14b052abcd..665af1c4195b 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -21,6 +21,7 @@
21#include <asm/machdep.h> 21#include <asm/machdep.h>
22#include <asm/rtas.h> 22#include <asm/rtas.h>
23#include <asm/cell-regs.h> 23#include <asm/cell-regs.h>
24#include <asm/kdump.h>
24 25
25#include "ras.h" 26#include "ras.h"
26 27
@@ -111,9 +112,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
111 int ret = -ENOMEM; 112 int ret = -ENOMEM;
112 unsigned long addr; 113 unsigned long addr;
113 114
114#ifdef CONFIG_CRASH_DUMP 115 if (__kdump_flag)
115 rtas_call(ptcal_stop_tok, 1, 1, NULL, nid); 116 rtas_call(ptcal_stop_tok, 1, 1, NULL, nid);
116#endif
117 117
118 area = kmalloc(sizeof(*area), GFP_KERNEL); 118 area = kmalloc(sizeof(*area), GFP_KERNEL);
119 if (!area) 119 if (!area)
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index efb3964457b1..c0d86e1f56ea 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -54,8 +54,8 @@
54#endif 54#endif
55 55
56/* 56/*
57 * The primary thread of each non-boot processor is recorded here before 57 * The Primary thread of each non-boot processor was started from the OF client
58 * smp init. 58 * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
59 */ 59 */
60static cpumask_t of_spin_map; 60static cpumask_t of_spin_map;
61 61
@@ -208,11 +208,7 @@ void __init smp_init_cell(void)
208 /* Mark threads which are still spinning in hold loops. */ 208 /* Mark threads which are still spinning in hold loops. */
209 if (cpu_has_feature(CPU_FTR_SMT)) { 209 if (cpu_has_feature(CPU_FTR_SMT)) {
210 for_each_present_cpu(i) { 210 for_each_present_cpu(i) {
211 if (i % 2 == 0) 211 if (cpu_thread_in_core(i) == 0)
212 /*
213 * Even-numbered logical cpus correspond to
214 * primary threads.
215 */
216 cpu_set(i, of_spin_map); 212 cpu_set(i, of_spin_map);
217 } 213 }
218 } else { 214 } else {
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 010a51f59796..b73c369cc6f1 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -548,6 +548,11 @@ spufs_regs_read(struct file *file, char __user *buffer,
548 int ret; 548 int ret;
549 struct spu_context *ctx = file->private_data; 549 struct spu_context *ctx = file->private_data;
550 550
551 /* pre-check for file position: if we'd return EOF, there's no point
552 * causing a deschedule */
553 if (*pos >= sizeof(ctx->csa.lscsa->gprs))
554 return 0;
555
551 ret = spu_acquire_saved(ctx); 556 ret = spu_acquire_saved(ctx);
552 if (ret) 557 if (ret)
553 return ret; 558 return ret;
@@ -2426,38 +2431,49 @@ static inline int spufs_switch_log_avail(struct spu_context *ctx)
2426static int spufs_switch_log_open(struct inode *inode, struct file *file) 2431static int spufs_switch_log_open(struct inode *inode, struct file *file)
2427{ 2432{
2428 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2433 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2434 int rc;
2435
2436 rc = spu_acquire(ctx);
2437 if (rc)
2438 return rc;
2429 2439
2430 /*
2431 * We (ab-)use the mapping_lock here because it serves the similar
2432 * purpose for synchronizing open/close elsewhere. Maybe it should
2433 * be renamed eventually.
2434 */
2435 mutex_lock(&ctx->mapping_lock);
2436 if (ctx->switch_log) { 2440 if (ctx->switch_log) {
2437 spin_lock(&ctx->switch_log->lock); 2441 rc = -EBUSY;
2438 ctx->switch_log->head = 0; 2442 goto out;
2439 ctx->switch_log->tail = 0;
2440 spin_unlock(&ctx->switch_log->lock);
2441 } else {
2442 /*
2443 * We allocate the switch log data structures on first open.
2444 * They will never be free because we assume a context will
2445 * be traced until it goes away.
2446 */
2447 ctx->switch_log = kzalloc(sizeof(struct switch_log) +
2448 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
2449 GFP_KERNEL);
2450 if (!ctx->switch_log)
2451 goto out;
2452 spin_lock_init(&ctx->switch_log->lock);
2453 init_waitqueue_head(&ctx->switch_log->wait);
2454 } 2443 }
2455 mutex_unlock(&ctx->mapping_lock); 2444
2445 ctx->switch_log = kmalloc(sizeof(struct switch_log) +
2446 SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
2447 GFP_KERNEL);
2448
2449 if (!ctx->switch_log) {
2450 rc = -ENOMEM;
2451 goto out;
2452 }
2453
2454 ctx->switch_log->head = ctx->switch_log->tail = 0;
2455 init_waitqueue_head(&ctx->switch_log->wait);
2456 rc = 0;
2457
2458out:
2459 spu_release(ctx);
2460 return rc;
2461}
2462
2463static int spufs_switch_log_release(struct inode *inode, struct file *file)
2464{
2465 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2466 int rc;
2467
2468 rc = spu_acquire(ctx);
2469 if (rc)
2470 return rc;
2471
2472 kfree(ctx->switch_log);
2473 ctx->switch_log = NULL;
2474 spu_release(ctx);
2456 2475
2457 return 0; 2476 return 0;
2458 out:
2459 mutex_unlock(&ctx->mapping_lock);
2460 return -ENOMEM;
2461} 2477}
2462 2478
2463static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) 2479static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
@@ -2485,42 +2501,54 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2485 if (!buf || len < 0) 2501 if (!buf || len < 0)
2486 return -EINVAL; 2502 return -EINVAL;
2487 2503
2504 error = spu_acquire(ctx);
2505 if (error)
2506 return error;
2507
2488 while (cnt < len) { 2508 while (cnt < len) {
2489 char tbuf[128]; 2509 char tbuf[128];
2490 int width; 2510 int width;
2491 2511
2492 if (file->f_flags & O_NONBLOCK) { 2512 if (spufs_switch_log_used(ctx) == 0) {
2493 if (spufs_switch_log_used(ctx) <= 0) 2513 if (cnt > 0) {
2494 return cnt ? cnt : -EAGAIN; 2514 /* If there's data ready to go, we can
2495 } else { 2515 * just return straight away */
2496 /* Wait for data in buffer */ 2516 break;
2497 error = wait_event_interruptible(ctx->switch_log->wait, 2517
2498 spufs_switch_log_used(ctx) > 0); 2518 } else if (file->f_flags & O_NONBLOCK) {
2499 if (error) 2519 error = -EAGAIN;
2500 break; 2520 break;
2501 }
2502 2521
2503 spin_lock(&ctx->switch_log->lock); 2522 } else {
2504 if (ctx->switch_log->head == ctx->switch_log->tail) { 2523 /* spufs_wait will drop the mutex and
2505 /* multiple readers race? */ 2524 * re-acquire, but since we're in read(), the
2506 spin_unlock(&ctx->switch_log->lock); 2525 * file cannot be _released (and so
2507 continue; 2526 * ctx->switch_log is stable).
2527 */
2528 error = spufs_wait(ctx->switch_log->wait,
2529 spufs_switch_log_used(ctx) > 0);
2530
2531 /* On error, spufs_wait returns without the
2532 * state mutex held */
2533 if (error)
2534 return error;
2535
2536 /* We may have had entries read from underneath
2537 * us while we dropped the mutex in spufs_wait,
2538 * so re-check */
2539 if (spufs_switch_log_used(ctx) == 0)
2540 continue;
2541 }
2508 } 2542 }
2509 2543
2510 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); 2544 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
2511 if (width < len) { 2545 if (width < len)
2512 ctx->switch_log->tail = 2546 ctx->switch_log->tail =
2513 (ctx->switch_log->tail + 1) % 2547 (ctx->switch_log->tail + 1) %
2514 SWITCH_LOG_BUFSIZE; 2548 SWITCH_LOG_BUFSIZE;
2515 } 2549 else
2516 2550 /* If the record is greater than space available return
2517 spin_unlock(&ctx->switch_log->lock); 2551 * partial buffer (so far) */
2518
2519 /*
2520 * If the record is greater than space available return
2521 * partial buffer (so far)
2522 */
2523 if (width >= len)
2524 break; 2552 break;
2525 2553
2526 error = copy_to_user(buf + cnt, tbuf, width); 2554 error = copy_to_user(buf + cnt, tbuf, width);
@@ -2529,6 +2557,8 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
2529 cnt += width; 2557 cnt += width;
2530 } 2558 }
2531 2559
2560 spu_release(ctx);
2561
2532 return cnt == 0 ? error : cnt; 2562 return cnt == 0 ? error : cnt;
2533} 2563}
2534 2564
@@ -2537,29 +2567,41 @@ static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
2537 struct inode *inode = file->f_path.dentry->d_inode; 2567 struct inode *inode = file->f_path.dentry->d_inode;
2538 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; 2568 struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
2539 unsigned int mask = 0; 2569 unsigned int mask = 0;
2570 int rc;
2540 2571
2541 poll_wait(file, &ctx->switch_log->wait, wait); 2572 poll_wait(file, &ctx->switch_log->wait, wait);
2542 2573
2574 rc = spu_acquire(ctx);
2575 if (rc)
2576 return rc;
2577
2543 if (spufs_switch_log_used(ctx) > 0) 2578 if (spufs_switch_log_used(ctx) > 0)
2544 mask |= POLLIN; 2579 mask |= POLLIN;
2545 2580
2581 spu_release(ctx);
2582
2546 return mask; 2583 return mask;
2547} 2584}
2548 2585
2549static const struct file_operations spufs_switch_log_fops = { 2586static const struct file_operations spufs_switch_log_fops = {
2550 .owner = THIS_MODULE, 2587 .owner = THIS_MODULE,
2551 .open = spufs_switch_log_open, 2588 .open = spufs_switch_log_open,
2552 .read = spufs_switch_log_read, 2589 .read = spufs_switch_log_read,
2553 .poll = spufs_switch_log_poll, 2590 .poll = spufs_switch_log_poll,
2591 .release = spufs_switch_log_release,
2554}; 2592};
2555 2593
2594/**
2595 * Log a context switch event to a switch log reader.
2596 *
2597 * Must be called with ctx->state_mutex held.
2598 */
2556void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, 2599void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2557 u32 type, u32 val) 2600 u32 type, u32 val)
2558{ 2601{
2559 if (!ctx->switch_log) 2602 if (!ctx->switch_log)
2560 return; 2603 return;
2561 2604
2562 spin_lock(&ctx->switch_log->lock);
2563 if (spufs_switch_log_avail(ctx) > 1) { 2605 if (spufs_switch_log_avail(ctx) > 1) {
2564 struct switch_log_entry *p; 2606 struct switch_log_entry *p;
2565 2607
@@ -2573,7 +2615,6 @@ void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2573 ctx->switch_log->head = 2615 ctx->switch_log->head =
2574 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; 2616 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
2575 } 2617 }
2576 spin_unlock(&ctx->switch_log->lock);
2577 2618
2578 wake_up(&ctx->switch_log->wait); 2619 wake_up(&ctx->switch_log->wait);
2579} 2620}
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index c9bb7cfd3dca..c58bd36b0c5b 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -249,6 +249,7 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
249 249
250 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); 250 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
251 clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); 251 clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
252 spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
252 spu_release(ctx); 253 spu_release(ctx);
253 254
254 if (signal_pending(current)) 255 if (signal_pending(current))
@@ -417,8 +418,6 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
417 ret = spu_run_fini(ctx, npc, &status); 418 ret = spu_run_fini(ctx, npc, &status);
418 spu_yield(ctx); 419 spu_yield(ctx);
419 420
420 spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
421
422 if ((status & SPU_STATUS_STOPPED_BY_STOP) && 421 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
423 (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100)) 422 (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
424 ctx->stats.libassist++; 423 ctx->stats.libassist++;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 67595bc380dc..2ad914c47493 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -312,6 +312,15 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
312 */ 312 */
313 node = cpu_to_node(raw_smp_processor_id()); 313 node = cpu_to_node(raw_smp_processor_id());
314 for (n = 0; n < MAX_NUMNODES; n++, node++) { 314 for (n = 0; n < MAX_NUMNODES; n++, node++) {
315 /*
316 * "available_spus" counts how many spus are not potentially
317 * going to be used by other affinity gangs whose reference
318 * context is already in place. Although this code seeks to
319 * avoid having affinity gangs with a summed amount of
320 * contexts bigger than the amount of spus in the node,
321 * this may happen sporadically. In this case, available_spus
322 * becomes negative, which is harmless.
323 */
315 int available_spus; 324 int available_spus;
316 325
317 node = (node < MAX_NUMNODES) ? node : 0; 326 node = (node < MAX_NUMNODES) ? node : 0;
@@ -321,12 +330,10 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
321 available_spus = 0; 330 available_spus = 0;
322 mutex_lock(&cbe_spu_info[node].list_mutex); 331 mutex_lock(&cbe_spu_info[node].list_mutex);
323 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { 332 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
324 if (spu->ctx && spu->ctx->gang 333 if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
325 && spu->ctx->aff_offset == 0) 334 && spu->ctx->gang->aff_ref_spu)
326 available_spus -= 335 available_spus -= spu->ctx->gang->contexts;
327 (spu->ctx->gang->contexts - 1); 336 available_spus++;
328 else
329 available_spus++;
330 } 337 }
331 if (available_spus < ctx->gang->contexts) { 338 if (available_spus < ctx->gang->contexts) {
332 mutex_unlock(&cbe_spu_info[node].list_mutex); 339 mutex_unlock(&cbe_spu_info[node].list_mutex);
@@ -437,6 +444,11 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
437 atomic_dec(&cbe_spu_info[spu->node].reserved_spus); 444 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
438 445
439 if (ctx->gang) 446 if (ctx->gang)
447 /*
448 * If ctx->gang->aff_sched_count is positive, SPU affinity is
449 * being considered in this gang. Using atomic_dec_if_positive
450 * allow us to skip an explicit check for affinity in this gang
451 */
440 atomic_dec_if_positive(&ctx->gang->aff_sched_count); 452 atomic_dec_if_positive(&ctx->gang->aff_sched_count);
441 453
442 spu_switch_notify(spu, NULL); 454 spu_switch_notify(spu, NULL);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 8ae8ef9dfc22..15c62d3ca129 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -65,7 +65,6 @@ enum {
65}; 65};
66 66
67struct switch_log { 67struct switch_log {
68 spinlock_t lock;
69 wait_queue_head_t wait; 68 wait_queue_head_t wait;
70 unsigned long head; 69 unsigned long head;
71 unsigned long tail; 70 unsigned long tail;
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
index 2ece399f2862..d0b1f3f4d9c8 100644
--- a/arch/powerpc/platforms/cell/spufs/sputrace.c
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -40,6 +40,7 @@ static DECLARE_WAIT_QUEUE_HEAD(sputrace_wait);
40static ktime_t sputrace_start; 40static ktime_t sputrace_start;
41static unsigned long sputrace_head, sputrace_tail; 41static unsigned long sputrace_head, sputrace_tail;
42static struct sputrace *sputrace_log; 42static struct sputrace *sputrace_log;
43static int sputrace_logging;
43 44
44static int sputrace_used(void) 45static int sputrace_used(void)
45{ 46{
@@ -79,6 +80,11 @@ static ssize_t sputrace_read(struct file *file, char __user *buf,
79 char tbuf[128]; 80 char tbuf[128];
80 int width; 81 int width;
81 82
83 /* If we have data ready to return, don't block waiting
84 * for more */
85 if (cnt > 0 && sputrace_used() == 0)
86 break;
87
82 error = wait_event_interruptible(sputrace_wait, 88 error = wait_event_interruptible(sputrace_wait,
83 sputrace_used() > 0); 89 sputrace_used() > 0);
84 if (error) 90 if (error)
@@ -109,24 +115,49 @@ static ssize_t sputrace_read(struct file *file, char __user *buf,
109 115
110static int sputrace_open(struct inode *inode, struct file *file) 116static int sputrace_open(struct inode *inode, struct file *file)
111{ 117{
118 int rc;
119
112 spin_lock(&sputrace_lock); 120 spin_lock(&sputrace_lock);
121 if (sputrace_logging) {
122 rc = -EBUSY;
123 goto out;
124 }
125
126 sputrace_logging = 1;
113 sputrace_head = sputrace_tail = 0; 127 sputrace_head = sputrace_tail = 0;
114 sputrace_start = ktime_get(); 128 sputrace_start = ktime_get();
129 rc = 0;
130
131out:
115 spin_unlock(&sputrace_lock); 132 spin_unlock(&sputrace_lock);
133 return rc;
134}
116 135
136static int sputrace_release(struct inode *inode, struct file *file)
137{
138 spin_lock(&sputrace_lock);
139 sputrace_logging = 0;
140 spin_unlock(&sputrace_lock);
117 return 0; 141 return 0;
118} 142}
119 143
120static const struct file_operations sputrace_fops = { 144static const struct file_operations sputrace_fops = {
121 .owner = THIS_MODULE, 145 .owner = THIS_MODULE,
122 .open = sputrace_open, 146 .open = sputrace_open,
123 .read = sputrace_read, 147 .read = sputrace_read,
148 .release = sputrace_release,
124}; 149};
125 150
126static void sputrace_log_item(const char *name, struct spu_context *ctx, 151static void sputrace_log_item(const char *name, struct spu_context *ctx,
127 struct spu *spu) 152 struct spu *spu)
128{ 153{
129 spin_lock(&sputrace_lock); 154 spin_lock(&sputrace_lock);
155
156 if (!sputrace_logging) {
157 spin_unlock(&sputrace_lock);
158 return;
159 }
160
130 if (sputrace_avail() > 1) { 161 if (sputrace_avail() > 1) {
131 struct sputrace *t = sputrace_log + sputrace_head; 162 struct sputrace *t = sputrace_log + sputrace_head;
132 163
diff --git a/arch/powerpc/platforms/embedded6xx/c2k.c b/arch/powerpc/platforms/embedded6xx/c2k.c
index d0b25b8c39d1..32ba0fa0ad03 100644
--- a/arch/powerpc/platforms/embedded6xx/c2k.c
+++ b/arch/powerpc/platforms/embedded6xx/c2k.c
@@ -116,10 +116,7 @@ static void c2k_restart(char *cmd)
116 116
117void c2k_show_cpuinfo(struct seq_file *m) 117void c2k_show_cpuinfo(struct seq_file *m)
118{ 118{
119 uint memsize = total_memory;
120
121 seq_printf(m, "Vendor\t\t: GEFanuc\n"); 119 seq_printf(m, "Vendor\t\t: GEFanuc\n");
122 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
123 seq_printf(m, "coherency\t: %s\n", COHERENCY_SETTING); 120 seq_printf(m, "coherency\t: %s\n", COHERENCY_SETTING);
124} 121}
125 122
diff --git a/arch/powerpc/platforms/embedded6xx/prpmc2800.c b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
index 5a19b9a1457c..4c485e984236 100644
--- a/arch/powerpc/platforms/embedded6xx/prpmc2800.c
+++ b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
@@ -119,10 +119,7 @@ static void prpmc2800_restart(char *cmd)
119 119
120void prpmc2800_show_cpuinfo(struct seq_file *m) 120void prpmc2800_show_cpuinfo(struct seq_file *m)
121{ 121{
122 uint memsize = total_memory;
123
124 seq_printf(m, "Vendor\t\t: Motorola\n"); 122 seq_printf(m, "Vendor\t\t: Motorola\n");
125 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
126 seq_printf(m, "coherency\t: %s\n", PPRPM2800_COHERENCY_SETTING); 123 seq_printf(m, "coherency\t: %s\n", PPRPM2800_COHERENCY_SETTING);
127} 124}
128 125
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 140d02a5232a..a623ad256e9e 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -22,6 +22,12 @@ static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
22 int ret; 22 int ret;
23 23
24 start_pfn = base >> PAGE_SHIFT; 24 start_pfn = base >> PAGE_SHIFT;
25
26 if (!pfn_valid(start_pfn)) {
27 lmb_remove(base, lmb_size);
28 return 0;
29 }
30
25 zone = page_zone(pfn_to_page(start_pfn)); 31 zone = page_zone(pfn_to_page(start_pfn));
26 32
27 /* 33 /*
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index a8c446697f9e..d56491d182d3 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -44,6 +44,7 @@
44#include <asm/tce.h> 44#include <asm/tce.h>
45#include <asm/ppc-pci.h> 45#include <asm/ppc-pci.h>
46#include <asm/udbg.h> 46#include <asm/udbg.h>
47#include <asm/kdump.h>
47 48
48#include "plpar_wrappers.h" 49#include "plpar_wrappers.h"
49 50
@@ -291,9 +292,8 @@ static void iommu_table_setparms(struct pci_controller *phb,
291 292
292 tbl->it_base = (unsigned long)__va(*basep); 293 tbl->it_base = (unsigned long)__va(*basep);
293 294
294#ifndef CONFIG_CRASH_DUMP 295 if (!__kdump_flag)
295 memset((void *)tbl->it_base, 0, *sizep); 296 memset((void *)tbl->it_base, 0, *sizep);
296#endif
297 297
298 tbl->it_busno = phb->bus->number; 298 tbl->it_busno = phb->bus->number;
299 299
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index e00f96baa381..1a231c389ba0 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -52,8 +52,8 @@
52 52
53 53
54/* 54/*
55 * The primary thread of each non-boot processor is recorded here before 55 * The Primary thread of each non-boot processor was started from the OF client
56 * smp init. 56 * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
57 */ 57 */
58static cpumask_t of_spin_map; 58static cpumask_t of_spin_map;
59 59
@@ -161,8 +161,7 @@ static void __devinit smp_pSeries_kick_cpu(int nr)
161static int smp_pSeries_cpu_bootable(unsigned int nr) 161static int smp_pSeries_cpu_bootable(unsigned int nr)
162{ 162{
163 /* Special case - we inhibit secondary thread startup 163 /* Special case - we inhibit secondary thread startup
164 * during boot if the user requests it. Odd-numbered 164 * during boot if the user requests it.
165 * cpus are assumed to be secondary threads.
166 */ 165 */
167 if (system_state < SYSTEM_RUNNING && 166 if (system_state < SYSTEM_RUNNING &&
168 cpu_has_feature(CPU_FTR_SMT) && 167 cpu_has_feature(CPU_FTR_SMT) &&
@@ -199,11 +198,7 @@ static void __init smp_init_pseries(void)
199 /* Mark threads which are still spinning in hold loops. */ 198 /* Mark threads which are still spinning in hold loops. */
200 if (cpu_has_feature(CPU_FTR_SMT)) { 199 if (cpu_has_feature(CPU_FTR_SMT)) {
201 for_each_present_cpu(i) { 200 for_each_present_cpu(i) {
202 if (i % 2 == 0) 201 if (cpu_thread_in_core(i) == 0)
203 /*
204 * Even-numbered logical cpus correspond to
205 * primary threads.
206 */
207 cpu_set(i, of_spin_map); 202 cpu_set(i, of_spin_map);
208 } 203 }
209 } else { 204 } else {
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index a44709a94f97..5afce115ab1f 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_OF_RTC) += of_rtc.o
37ifeq ($(CONFIG_PCI),y) 37ifeq ($(CONFIG_PCI),y)
38obj-$(CONFIG_4xx) += ppc4xx_pci.o 38obj-$(CONFIG_4xx) += ppc4xx_pci.o
39endif 39endif
40obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o
40 41
41obj-$(CONFIG_CPM) += cpm_common.o 42obj-$(CONFIG_CPM) += cpm_common.o
42obj-$(CONFIG_CPM2) += cpm2.o cpm2_pic.o 43obj-$(CONFIG_CPM2) += cpm2.o cpm2_pic.o
diff --git a/arch/powerpc/sysdev/ppc4xx_gpio.c b/arch/powerpc/sysdev/ppc4xx_gpio.c
new file mode 100644
index 000000000000..110efe2a54fc
--- /dev/null
+++ b/arch/powerpc/sysdev/ppc4xx_gpio.c
@@ -0,0 +1,217 @@
1/*
2 * PPC4xx gpio driver
3 *
4 * Copyright (c) 2008 Harris Corporation
5 * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
6 * Copyright (c) MontaVista Software, Inc. 2008.
7 *
8 * Author: Steve Falco <sfalco@harris.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/spinlock.h>
27#include <linux/io.h>
28#include <linux/of.h>
29#include <linux/of_gpio.h>
30#include <linux/gpio.h>
31#include <linux/types.h>
32
33#define GPIO_MASK(gpio) (0x80000000 >> (gpio))
34#define GPIO_MASK2(gpio) (0xc0000000 >> ((gpio) * 2))
35
36/* Physical GPIO register layout */
37struct ppc4xx_gpio {
38 __be32 or;
39 __be32 tcr;
40 __be32 osrl;
41 __be32 osrh;
42 __be32 tsrl;
43 __be32 tsrh;
44 __be32 odr;
45 __be32 ir;
46 __be32 rr1;
47 __be32 rr2;
48 __be32 rr3;
49 __be32 reserved1;
50 __be32 isr1l;
51 __be32 isr1h;
52 __be32 isr2l;
53 __be32 isr2h;
54 __be32 isr3l;
55 __be32 isr3h;
56};
57
58struct ppc4xx_gpio_chip {
59 struct of_mm_gpio_chip mm_gc;
60 spinlock_t lock;
61};
62
63/*
64 * GPIO LIB API implementation for GPIOs
65 *
66 * There are a maximum of 32 gpios in each gpio controller.
67 */
68
69static inline struct ppc4xx_gpio_chip *
70to_ppc4xx_gpiochip(struct of_mm_gpio_chip *mm_gc)
71{
72 return container_of(mm_gc, struct ppc4xx_gpio_chip, mm_gc);
73}
74
75static int ppc4xx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
76{
77 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
78 struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
79
80 return in_be32(&regs->ir) & GPIO_MASK(gpio);
81}
82
83static inline void
84__ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
85{
86 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
87 struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
88
89 if (val)
90 setbits32(&regs->or, GPIO_MASK(gpio));
91 else
92 clrbits32(&regs->or, GPIO_MASK(gpio));
93}
94
95static void
96ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
97{
98 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
99 struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc);
100 unsigned long flags;
101
102 spin_lock_irqsave(&chip->lock, flags);
103
104 __ppc4xx_gpio_set(gc, gpio, val);
105
106 spin_unlock_irqrestore(&chip->lock, flags);
107
108 pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
109}
110
111static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
112{
113 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
114 struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc);
115 struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
116 unsigned long flags;
117
118 spin_lock_irqsave(&chip->lock, flags);
119
120 /* Disable open-drain function */
121 clrbits32(&regs->odr, GPIO_MASK(gpio));
122
123 /* Float the pin */
124 clrbits32(&regs->tcr, GPIO_MASK(gpio));
125
126 /* Bits 0-15 use TSRL/OSRL, bits 16-31 use TSRH/OSRH */
127 if (gpio < 16) {
128 clrbits32(&regs->osrl, GPIO_MASK2(gpio));
129 clrbits32(&regs->tsrl, GPIO_MASK2(gpio));
130 } else {
131 clrbits32(&regs->osrh, GPIO_MASK2(gpio));
132 clrbits32(&regs->tsrh, GPIO_MASK2(gpio));
133 }
134
135 spin_unlock_irqrestore(&chip->lock, flags);
136
137 return 0;
138}
139
140static int
141ppc4xx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
142{
143 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
144 struct ppc4xx_gpio_chip *chip = to_ppc4xx_gpiochip(mm_gc);
145 struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
146 unsigned long flags;
147
148 spin_lock_irqsave(&chip->lock, flags);
149
150 /* First set initial value */
151 __ppc4xx_gpio_set(gc, gpio, val);
152
153 /* Disable open-drain function */
154 clrbits32(&regs->odr, GPIO_MASK(gpio));
155
156 /* Drive the pin */
157 setbits32(&regs->tcr, GPIO_MASK(gpio));
158
159 /* Bits 0-15 use TSRL, bits 16-31 use TSRH */
160 if (gpio < 16) {
161 clrbits32(&regs->osrl, GPIO_MASK2(gpio));
162 clrbits32(&regs->tsrl, GPIO_MASK2(gpio));
163 } else {
164 clrbits32(&regs->osrh, GPIO_MASK2(gpio));
165 clrbits32(&regs->tsrh, GPIO_MASK2(gpio));
166 }
167
168 spin_unlock_irqrestore(&chip->lock, flags);
169
170 pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
171
172 return 0;
173}
174
175static int __init ppc4xx_add_gpiochips(void)
176{
177 struct device_node *np;
178
179 for_each_compatible_node(np, NULL, "ibm,ppc4xx-gpio") {
180 int ret;
181 struct ppc4xx_gpio_chip *ppc4xx_gc;
182 struct of_mm_gpio_chip *mm_gc;
183 struct of_gpio_chip *of_gc;
184 struct gpio_chip *gc;
185
186 ppc4xx_gc = kzalloc(sizeof(*ppc4xx_gc), GFP_KERNEL);
187 if (!ppc4xx_gc) {
188 ret = -ENOMEM;
189 goto err;
190 }
191
192 spin_lock_init(&ppc4xx_gc->lock);
193
194 mm_gc = &ppc4xx_gc->mm_gc;
195 of_gc = &mm_gc->of_gc;
196 gc = &of_gc->gc;
197
198 of_gc->gpio_cells = 2;
199 gc->ngpio = 32;
200 gc->direction_input = ppc4xx_gpio_dir_in;
201 gc->direction_output = ppc4xx_gpio_dir_out;
202 gc->get = ppc4xx_gpio_get;
203 gc->set = ppc4xx_gpio_set;
204
205 ret = of_mm_gpiochip_add(np, mm_gc);
206 if (ret)
207 goto err;
208 continue;
209err:
210 pr_err("%s: registration failed with status %d\n",
211 np->full_name, ret);
212 kfree(ppc4xx_gc);
213 /* try others anyway */
214 }
215 return 0;
216}
217arch_initcall(ppc4xx_add_gpiochips);
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
index 9ab815b95b5a..17bb6035069b 100644
--- a/arch/sparc/oprofile/init.c
+++ b/arch/sparc/oprofile/init.c
@@ -12,7 +12,7 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/init.h> 13#include <linux/init.h>
14 14
15int __init oprofile_arch_init(struct oprofile_operations * ops) 15int __init oprofile_arch_init(struct oprofile_operations *ops)
16{ 16{
17 return -ENODEV; 17 return -ENODEV;
18} 18}
diff --git a/arch/sparc64/oprofile/init.c b/arch/sparc64/oprofile/init.c
index 9ab815b95b5a..17bb6035069b 100644
--- a/arch/sparc64/oprofile/init.c
+++ b/arch/sparc64/oprofile/init.c
@@ -12,7 +12,7 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/init.h> 13#include <linux/init.h>
14 14
15int __init oprofile_arch_init(struct oprofile_operations * ops) 15int __init oprofile_arch_init(struct oprofile_operations *ops)
16{ 16{
17 return -ENODEV; 17 return -ENODEV;
18} 18}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 4cd8083c58be..0cdcda35a05f 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -212,7 +212,7 @@ static void __init iommu_set_exclusion_range(struct amd_iommu *iommu)
212/* Programs the physical address of the device table into the IOMMU hardware */ 212/* Programs the physical address of the device table into the IOMMU hardware */
213static void __init iommu_set_device_table(struct amd_iommu *iommu) 213static void __init iommu_set_device_table(struct amd_iommu *iommu)
214{ 214{
215 u32 entry; 215 u64 entry;
216 216
217 BUG_ON(iommu->mmio_base == NULL); 217 BUG_ON(iommu->mmio_base == NULL);
218 218
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index ccf6c503fc3b..d1d4dc52f649 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -36,7 +36,7 @@ void ack_bad_irq(unsigned int irq)
36} 36}
37 37
38#ifdef CONFIG_X86_32 38#ifdef CONFIG_X86_32
39# define irq_stats(x) (&per_cpu(irq_stat,x)) 39# define irq_stats(x) (&per_cpu(irq_stat, x))
40#else 40#else
41# define irq_stats(x) cpu_pda(x) 41# define irq_stats(x) cpu_pda(x)
42#endif 42#endif
@@ -113,7 +113,7 @@ int show_interrupts(struct seq_file *p, void *v)
113 if (i == 0) { 113 if (i == 0) {
114 seq_printf(p, " "); 114 seq_printf(p, " ");
115 for_each_online_cpu(j) 115 for_each_online_cpu(j)
116 seq_printf(p, "CPU%-8d",j); 116 seq_printf(p, "CPU%-8d", j);
117 seq_putc(p, '\n'); 117 seq_putc(p, '\n');
118 } 118 }
119 119
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 192624820217..1972266e8ba5 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -9,8 +9,6 @@
9#include <asm/calgary.h> 9#include <asm/calgary.h>
10#include <asm/amd_iommu.h> 10#include <asm/amd_iommu.h>
11 11
12static int forbid_dac __read_mostly;
13
14struct dma_mapping_ops *dma_ops; 12struct dma_mapping_ops *dma_ops;
15EXPORT_SYMBOL(dma_ops); 13EXPORT_SYMBOL(dma_ops);
16 14
@@ -293,17 +291,3 @@ void pci_iommu_shutdown(void)
293} 291}
294/* Must execute after PCI subsystem */ 292/* Must execute after PCI subsystem */
295fs_initcall(pci_iommu_init); 293fs_initcall(pci_iommu_init);
296
297#ifdef CONFIG_PCI
298/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
299
300static __devinit void via_no_dac(struct pci_dev *dev)
301{
302 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
303 printk(KERN_INFO "PCI: VIA PCI bridge detected."
304 "Disabling DAC.\n");
305 forbid_dac = 1;
306 }
307}
308DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
309#endif
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index e2095cba409f..04df67f8a7ba 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -52,8 +52,7 @@ struct frame_head {
52 unsigned long ret; 52 unsigned long ret;
53} __attribute__((packed)); 53} __attribute__((packed));
54 54
55static struct frame_head * 55static struct frame_head *dump_user_backtrace(struct frame_head *head)
56dump_user_backtrace(struct frame_head * head)
57{ 56{
58 struct frame_head bufhead[2]; 57 struct frame_head bufhead[2];
59 58
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 57f6c9088081..022cd41ea9b4 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -28,85 +28,9 @@ static struct op_x86_model_spec const *model;
28static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); 28static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
29static DEFINE_PER_CPU(unsigned long, saved_lvtpc); 29static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
30 30
31static int nmi_start(void);
32static void nmi_stop(void);
33static void nmi_cpu_start(void *dummy);
34static void nmi_cpu_stop(void *dummy);
35
36/* 0 == registered but off, 1 == registered and on */ 31/* 0 == registered but off, 1 == registered and on */
37static int nmi_enabled = 0; 32static int nmi_enabled = 0;
38 33
39#ifdef CONFIG_SMP
40static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
41 void *data)
42{
43 int cpu = (unsigned long)data;
44 switch (action) {
45 case CPU_DOWN_FAILED:
46 case CPU_ONLINE:
47 smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
48 break;
49 case CPU_DOWN_PREPARE:
50 smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
51 break;
52 }
53 return NOTIFY_DONE;
54}
55
56static struct notifier_block oprofile_cpu_nb = {
57 .notifier_call = oprofile_cpu_notifier
58};
59#endif
60
61#ifdef CONFIG_PM
62
63static int nmi_suspend(struct sys_device *dev, pm_message_t state)
64{
65 /* Only one CPU left, just stop that one */
66 if (nmi_enabled == 1)
67 nmi_cpu_stop(NULL);
68 return 0;
69}
70
71static int nmi_resume(struct sys_device *dev)
72{
73 if (nmi_enabled == 1)
74 nmi_cpu_start(NULL);
75 return 0;
76}
77
78static struct sysdev_class oprofile_sysclass = {
79 .name = "oprofile",
80 .resume = nmi_resume,
81 .suspend = nmi_suspend,
82};
83
84static struct sys_device device_oprofile = {
85 .id = 0,
86 .cls = &oprofile_sysclass,
87};
88
89static int __init init_sysfs(void)
90{
91 int error;
92
93 error = sysdev_class_register(&oprofile_sysclass);
94 if (!error)
95 error = sysdev_register(&device_oprofile);
96 return error;
97}
98
99static void exit_sysfs(void)
100{
101 sysdev_unregister(&device_oprofile);
102 sysdev_class_unregister(&oprofile_sysclass);
103}
104
105#else
106#define init_sysfs() do { } while (0)
107#define exit_sysfs() do { } while (0)
108#endif /* CONFIG_PM */
109
110static int profile_exceptions_notify(struct notifier_block *self, 34static int profile_exceptions_notify(struct notifier_block *self,
111 unsigned long val, void *data) 35 unsigned long val, void *data)
112{ 36{
@@ -361,6 +285,77 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
361 return 0; 285 return 0;
362} 286}
363 287
288#ifdef CONFIG_SMP
289static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
290 void *data)
291{
292 int cpu = (unsigned long)data;
293 switch (action) {
294 case CPU_DOWN_FAILED:
295 case CPU_ONLINE:
296 smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
297 break;
298 case CPU_DOWN_PREPARE:
299 smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
300 break;
301 }
302 return NOTIFY_DONE;
303}
304
305static struct notifier_block oprofile_cpu_nb = {
306 .notifier_call = oprofile_cpu_notifier
307};
308#endif
309
310#ifdef CONFIG_PM
311
312static int nmi_suspend(struct sys_device *dev, pm_message_t state)
313{
314 /* Only one CPU left, just stop that one */
315 if (nmi_enabled == 1)
316 nmi_cpu_stop(NULL);
317 return 0;
318}
319
320static int nmi_resume(struct sys_device *dev)
321{
322 if (nmi_enabled == 1)
323 nmi_cpu_start(NULL);
324 return 0;
325}
326
327static struct sysdev_class oprofile_sysclass = {
328 .name = "oprofile",
329 .resume = nmi_resume,
330 .suspend = nmi_suspend,
331};
332
333static struct sys_device device_oprofile = {
334 .id = 0,
335 .cls = &oprofile_sysclass,
336};
337
338static int __init init_sysfs(void)
339{
340 int error;
341
342 error = sysdev_class_register(&oprofile_sysclass);
343 if (!error)
344 error = sysdev_register(&device_oprofile);
345 return error;
346}
347
348static void exit_sysfs(void)
349{
350 sysdev_unregister(&device_oprofile);
351 sysdev_class_unregister(&oprofile_sysclass);
352}
353
354#else
355#define init_sysfs() do { } while (0)
356#define exit_sysfs() do { } while (0)
357#endif /* CONFIG_PM */
358
364static int p4force; 359static int p4force;
365module_param(p4force, int, 0); 360module_param(p4force, int, 0);
366 361
@@ -420,9 +415,6 @@ static int __init ppro_init(char **cpu_type)
420 case 15: case 23: 415 case 15: case 23:
421 *cpu_type = "i386/core_2"; 416 *cpu_type = "i386/core_2";
422 break; 417 break;
423 case 26:
424 *cpu_type = "i386/core_2";
425 break;
426 default: 418 default:
427 /* Unknown */ 419 /* Unknown */
428 return 0; 420 return 0;
@@ -432,6 +424,16 @@ static int __init ppro_init(char **cpu_type)
432 return 1; 424 return 1;
433} 425}
434 426
427static int __init arch_perfmon_init(char **cpu_type)
428{
429 if (!cpu_has_arch_perfmon)
430 return 0;
431 *cpu_type = "i386/arch_perfmon";
432 model = &op_arch_perfmon_spec;
433 arch_perfmon_setup_counters();
434 return 1;
435}
436
435/* in order to get sysfs right */ 437/* in order to get sysfs right */
436static int using_nmi; 438static int using_nmi;
437 439
@@ -439,7 +441,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
439{ 441{
440 __u8 vendor = boot_cpu_data.x86_vendor; 442 __u8 vendor = boot_cpu_data.x86_vendor;
441 __u8 family = boot_cpu_data.x86; 443 __u8 family = boot_cpu_data.x86;
442 char *cpu_type; 444 char *cpu_type = NULL;
443 int ret = 0; 445 int ret = 0;
444 446
445 if (!cpu_has_apic) 447 if (!cpu_has_apic)
@@ -477,19 +479,20 @@ int __init op_nmi_init(struct oprofile_operations *ops)
477 switch (family) { 479 switch (family) {
478 /* Pentium IV */ 480 /* Pentium IV */
479 case 0xf: 481 case 0xf:
480 if (!p4_init(&cpu_type)) 482 p4_init(&cpu_type);
481 return -ENODEV;
482 break; 483 break;
483 484
484 /* A P6-class processor */ 485 /* A P6-class processor */
485 case 6: 486 case 6:
486 if (!ppro_init(&cpu_type)) 487 ppro_init(&cpu_type);
487 return -ENODEV;
488 break; 488 break;
489 489
490 default: 490 default:
491 return -ENODEV; 491 break;
492 } 492 }
493
494 if (!cpu_type && !arch_perfmon_init(&cpu_type))
495 return -ENODEV;
493 break; 496 break;
494 497
495 default: 498 default:
diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h
index 2880b15c4675..91b6a116165e 100644
--- a/arch/x86/oprofile/op_counter.h
+++ b/arch/x86/oprofile/op_counter.h
@@ -6,22 +6,22 @@
6 * 6 *
7 * @author John Levon 7 * @author John Levon
8 */ 8 */
9 9
10#ifndef OP_COUNTER_H 10#ifndef OP_COUNTER_H
11#define OP_COUNTER_H 11#define OP_COUNTER_H
12 12
13#define OP_MAX_COUNTER 8 13#define OP_MAX_COUNTER 8
14 14
15/* Per-perfctr configuration as set via 15/* Per-perfctr configuration as set via
16 * oprofilefs. 16 * oprofilefs.
17 */ 17 */
18struct op_counter_config { 18struct op_counter_config {
19 unsigned long count; 19 unsigned long count;
20 unsigned long enabled; 20 unsigned long enabled;
21 unsigned long event; 21 unsigned long event;
22 unsigned long kernel; 22 unsigned long kernel;
23 unsigned long user; 23 unsigned long user;
24 unsigned long unit_mask; 24 unsigned long unit_mask;
25}; 25};
26 26
27extern struct op_counter_config counter_config[]; 27extern struct op_counter_config counter_config[];
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index d9faf607b3a6..509513760a6e 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -67,8 +67,9 @@ static unsigned long reset_value[NUM_COUNTERS];
67 67
68/* The function interface needs to be fixed, something like add 68/* The function interface needs to be fixed, something like add
69 data. Should then be added to linux/oprofile.h. */ 69 data. Should then be added to linux/oprofile.h. */
70extern void oprofile_add_ibs_sample(struct pt_regs *const regs, 70extern void
71 unsigned int * const ibs_sample, u8 code); 71oprofile_add_ibs_sample(struct pt_regs *const regs,
72 unsigned int *const ibs_sample, int ibs_code);
72 73
73struct ibs_fetch_sample { 74struct ibs_fetch_sample {
74 /* MSRC001_1031 IBS Fetch Linear Address Register */ 75 /* MSRC001_1031 IBS Fetch Linear Address Register */
@@ -309,12 +310,15 @@ static void op_amd_start(struct op_msrs const * const msrs)
309#ifdef CONFIG_OPROFILE_IBS 310#ifdef CONFIG_OPROFILE_IBS
310 if (ibs_allowed && ibs_config.fetch_enabled) { 311 if (ibs_allowed && ibs_config.fetch_enabled) {
311 low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; 312 low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
312 high = IBS_FETCH_HIGH_ENABLE; 313 high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */
314 + IBS_FETCH_HIGH_ENABLE;
313 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); 315 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
314 } 316 }
315 317
316 if (ibs_allowed && ibs_config.op_enabled) { 318 if (ibs_allowed && ibs_config.op_enabled) {
317 low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF) + IBS_OP_LOW_ENABLE; 319 low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
320 + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
321 + IBS_OP_LOW_ENABLE;
318 high = 0; 322 high = 0;
319 wrmsr(MSR_AMD64_IBSOPCTL, low, high); 323 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
320 } 324 }
@@ -468,11 +472,10 @@ static void clear_ibs_nmi(void)
468 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1); 472 on_each_cpu(apic_clear_ibs_nmi_per_cpu, NULL, 1);
469} 473}
470 474
471static int (*create_arch_files)(struct super_block * sb, struct dentry * root); 475static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
472 476
473static int setup_ibs_files(struct super_block * sb, struct dentry * root) 477static int setup_ibs_files(struct super_block *sb, struct dentry *root)
474{ 478{
475 char buf[12];
476 struct dentry *dir; 479 struct dentry *dir;
477 int ret = 0; 480 int ret = 0;
478 481
@@ -494,22 +497,22 @@ static int setup_ibs_files(struct super_block * sb, struct dentry * root)
494 ibs_config.max_cnt_op = 250000; 497 ibs_config.max_cnt_op = 250000;
495 ibs_config.op_enabled = 0; 498 ibs_config.op_enabled = 0;
496 ibs_config.dispatched_ops = 1; 499 ibs_config.dispatched_ops = 1;
497 snprintf(buf, sizeof(buf), "ibs_fetch"); 500
498 dir = oprofilefs_mkdir(sb, root, buf); 501 dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
499 oprofilefs_create_ulong(sb, dir, "rand_enable",
500 &ibs_config.rand_en);
501 oprofilefs_create_ulong(sb, dir, "enable", 502 oprofilefs_create_ulong(sb, dir, "enable",
502 &ibs_config.fetch_enabled); 503 &ibs_config.fetch_enabled);
503 oprofilefs_create_ulong(sb, dir, "max_count", 504 oprofilefs_create_ulong(sb, dir, "max_count",
504 &ibs_config.max_cnt_fetch); 505 &ibs_config.max_cnt_fetch);
505 snprintf(buf, sizeof(buf), "ibs_uops"); 506 oprofilefs_create_ulong(sb, dir, "rand_enable",
506 dir = oprofilefs_mkdir(sb, root, buf); 507 &ibs_config.rand_en);
508
509 dir = oprofilefs_mkdir(sb, root, "ibs_op");
507 oprofilefs_create_ulong(sb, dir, "enable", 510 oprofilefs_create_ulong(sb, dir, "enable",
508 &ibs_config.op_enabled); 511 &ibs_config.op_enabled);
509 oprofilefs_create_ulong(sb, dir, "max_count", 512 oprofilefs_create_ulong(sb, dir, "max_count",
510 &ibs_config.max_cnt_op); 513 &ibs_config.max_cnt_op);
511 oprofilefs_create_ulong(sb, dir, "dispatched_ops", 514 oprofilefs_create_ulong(sb, dir, "dispatched_ops",
512 &ibs_config.dispatched_ops); 515 &ibs_config.dispatched_ops);
513 516
514 return 0; 517 return 0;
515} 518}
@@ -530,14 +533,14 @@ static void op_amd_exit(void)
530#endif 533#endif
531 534
532struct op_x86_model_spec const op_amd_spec = { 535struct op_x86_model_spec const op_amd_spec = {
533 .init = op_amd_init, 536 .init = op_amd_init,
534 .exit = op_amd_exit, 537 .exit = op_amd_exit,
535 .num_counters = NUM_COUNTERS, 538 .num_counters = NUM_COUNTERS,
536 .num_controls = NUM_CONTROLS, 539 .num_controls = NUM_CONTROLS,
537 .fill_in_addresses = &op_amd_fill_in_addresses, 540 .fill_in_addresses = &op_amd_fill_in_addresses,
538 .setup_ctrs = &op_amd_setup_ctrs, 541 .setup_ctrs = &op_amd_setup_ctrs,
539 .check_ctrs = &op_amd_check_ctrs, 542 .check_ctrs = &op_amd_check_ctrs,
540 .start = &op_amd_start, 543 .start = &op_amd_start,
541 .stop = &op_amd_stop, 544 .stop = &op_amd_stop,
542 .shutdown = &op_amd_shutdown 545 .shutdown = &op_amd_shutdown
543}; 546};
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 43ac5af338d8..4c4a51c90bc2 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -698,24 +698,24 @@ static void p4_shutdown(struct op_msrs const * const msrs)
698 698
699#ifdef CONFIG_SMP 699#ifdef CONFIG_SMP
700struct op_x86_model_spec const op_p4_ht2_spec = { 700struct op_x86_model_spec const op_p4_ht2_spec = {
701 .num_counters = NUM_COUNTERS_HT2, 701 .num_counters = NUM_COUNTERS_HT2,
702 .num_controls = NUM_CONTROLS_HT2, 702 .num_controls = NUM_CONTROLS_HT2,
703 .fill_in_addresses = &p4_fill_in_addresses, 703 .fill_in_addresses = &p4_fill_in_addresses,
704 .setup_ctrs = &p4_setup_ctrs, 704 .setup_ctrs = &p4_setup_ctrs,
705 .check_ctrs = &p4_check_ctrs, 705 .check_ctrs = &p4_check_ctrs,
706 .start = &p4_start, 706 .start = &p4_start,
707 .stop = &p4_stop, 707 .stop = &p4_stop,
708 .shutdown = &p4_shutdown 708 .shutdown = &p4_shutdown
709}; 709};
710#endif 710#endif
711 711
712struct op_x86_model_spec const op_p4_spec = { 712struct op_x86_model_spec const op_p4_spec = {
713 .num_counters = NUM_COUNTERS_NON_HT, 713 .num_counters = NUM_COUNTERS_NON_HT,
714 .num_controls = NUM_CONTROLS_NON_HT, 714 .num_controls = NUM_CONTROLS_NON_HT,
715 .fill_in_addresses = &p4_fill_in_addresses, 715 .fill_in_addresses = &p4_fill_in_addresses,
716 .setup_ctrs = &p4_setup_ctrs, 716 .setup_ctrs = &p4_setup_ctrs,
717 .check_ctrs = &p4_check_ctrs, 717 .check_ctrs = &p4_check_ctrs,
718 .start = &p4_start, 718 .start = &p4_start,
719 .stop = &p4_stop, 719 .stop = &p4_stop,
720 .shutdown = &p4_shutdown 720 .shutdown = &p4_shutdown
721}; 721};
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index eff431f6c57b..0620d6d45f7d 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -1,32 +1,34 @@
1/* 1/*
2 * @file op_model_ppro.h 2 * @file op_model_ppro.h
3 * pentium pro / P6 model-specific MSR operations 3 * Family 6 perfmon and architectural perfmon MSR operations
4 * 4 *
5 * @remark Copyright 2002 OProfile authors 5 * @remark Copyright 2002 OProfile authors
6 * @remark Copyright 2008 Intel Corporation
6 * @remark Read the file COPYING 7 * @remark Read the file COPYING
7 * 8 *
8 * @author John Levon 9 * @author John Levon
9 * @author Philippe Elie 10 * @author Philippe Elie
10 * @author Graydon Hoare 11 * @author Graydon Hoare
12 * @author Andi Kleen
11 */ 13 */
12 14
13#include <linux/oprofile.h> 15#include <linux/oprofile.h>
16#include <linux/slab.h>
14#include <asm/ptrace.h> 17#include <asm/ptrace.h>
15#include <asm/msr.h> 18#include <asm/msr.h>
16#include <asm/apic.h> 19#include <asm/apic.h>
17#include <asm/nmi.h> 20#include <asm/nmi.h>
21#include <asm/intel_arch_perfmon.h>
18 22
19#include "op_x86_model.h" 23#include "op_x86_model.h"
20#include "op_counter.h" 24#include "op_counter.h"
21 25
22#define NUM_COUNTERS 2 26static int num_counters = 2;
23#define NUM_CONTROLS 2 27static int counter_width = 32;
24 28
25#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) 29#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
26#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) 30#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0)
27#define CTR_32BIT_WRITE(l, msrs, c) \ 31#define CTR_OVERFLOWED(n) (!((n) & (1U<<(counter_width-1))))
28 do {wrmsr(msrs->counters[(c)].addr, -(u32)(l), 0); } while (0)
29#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
30 32
31#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0) 33#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
32#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0) 34#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
@@ -40,20 +42,20 @@
40#define CTRL_SET_UM(val, m) (val |= (m << 8)) 42#define CTRL_SET_UM(val, m) (val |= (m << 8))
41#define CTRL_SET_EVENT(val, e) (val |= e) 43#define CTRL_SET_EVENT(val, e) (val |= e)
42 44
43static unsigned long reset_value[NUM_COUNTERS]; 45static u64 *reset_value;
44 46
45static void ppro_fill_in_addresses(struct op_msrs * const msrs) 47static void ppro_fill_in_addresses(struct op_msrs * const msrs)
46{ 48{
47 int i; 49 int i;
48 50
49 for (i = 0; i < NUM_COUNTERS; i++) { 51 for (i = 0; i < num_counters; i++) {
50 if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) 52 if (reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i))
51 msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; 53 msrs->counters[i].addr = MSR_P6_PERFCTR0 + i;
52 else 54 else
53 msrs->counters[i].addr = 0; 55 msrs->counters[i].addr = 0;
54 } 56 }
55 57
56 for (i = 0; i < NUM_CONTROLS; i++) { 58 for (i = 0; i < num_counters; i++) {
57 if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) 59 if (reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i))
58 msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; 60 msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i;
59 else 61 else
@@ -67,8 +69,22 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
67 unsigned int low, high; 69 unsigned int low, high;
68 int i; 70 int i;
69 71
72 if (!reset_value) {
73 reset_value = kmalloc(sizeof(unsigned) * num_counters,
74 GFP_ATOMIC);
75 if (!reset_value)
76 return;
77 }
78
79 if (cpu_has_arch_perfmon) {
80 union cpuid10_eax eax;
81 eax.full = cpuid_eax(0xa);
82 if (counter_width < eax.split.bit_width)
83 counter_width = eax.split.bit_width;
84 }
85
70 /* clear all counters */ 86 /* clear all counters */
71 for (i = 0 ; i < NUM_CONTROLS; ++i) { 87 for (i = 0 ; i < num_counters; ++i) {
72 if (unlikely(!CTRL_IS_RESERVED(msrs, i))) 88 if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
73 continue; 89 continue;
74 CTRL_READ(low, high, msrs, i); 90 CTRL_READ(low, high, msrs, i);
@@ -77,18 +93,18 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
77 } 93 }
78 94
79 /* avoid a false detection of ctr overflows in NMI handler */ 95 /* avoid a false detection of ctr overflows in NMI handler */
80 for (i = 0; i < NUM_COUNTERS; ++i) { 96 for (i = 0; i < num_counters; ++i) {
81 if (unlikely(!CTR_IS_RESERVED(msrs, i))) 97 if (unlikely(!CTR_IS_RESERVED(msrs, i)))
82 continue; 98 continue;
83 CTR_32BIT_WRITE(1, msrs, i); 99 wrmsrl(msrs->counters[i].addr, -1LL);
84 } 100 }
85 101
86 /* enable active counters */ 102 /* enable active counters */
87 for (i = 0; i < NUM_COUNTERS; ++i) { 103 for (i = 0; i < num_counters; ++i) {
88 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { 104 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
89 reset_value[i] = counter_config[i].count; 105 reset_value[i] = counter_config[i].count;
90 106
91 CTR_32BIT_WRITE(counter_config[i].count, msrs, i); 107 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
92 108
93 CTRL_READ(low, high, msrs, i); 109 CTRL_READ(low, high, msrs, i);
94 CTRL_CLEAR(low); 110 CTRL_CLEAR(low);
@@ -111,13 +127,13 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
111 unsigned int low, high; 127 unsigned int low, high;
112 int i; 128 int i;
113 129
114 for (i = 0 ; i < NUM_COUNTERS; ++i) { 130 for (i = 0 ; i < num_counters; ++i) {
115 if (!reset_value[i]) 131 if (!reset_value[i])
116 continue; 132 continue;
117 CTR_READ(low, high, msrs, i); 133 CTR_READ(low, high, msrs, i);
118 if (CTR_OVERFLOWED(low)) { 134 if (CTR_OVERFLOWED(low)) {
119 oprofile_add_sample(regs, i); 135 oprofile_add_sample(regs, i);
120 CTR_32BIT_WRITE(reset_value[i], msrs, i); 136 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
121 } 137 }
122 } 138 }
123 139
@@ -141,7 +157,7 @@ static void ppro_start(struct op_msrs const * const msrs)
141 unsigned int low, high; 157 unsigned int low, high;
142 int i; 158 int i;
143 159
144 for (i = 0; i < NUM_COUNTERS; ++i) { 160 for (i = 0; i < num_counters; ++i) {
145 if (reset_value[i]) { 161 if (reset_value[i]) {
146 CTRL_READ(low, high, msrs, i); 162 CTRL_READ(low, high, msrs, i);
147 CTRL_SET_ACTIVE(low); 163 CTRL_SET_ACTIVE(low);
@@ -156,7 +172,7 @@ static void ppro_stop(struct op_msrs const * const msrs)
156 unsigned int low, high; 172 unsigned int low, high;
157 int i; 173 int i;
158 174
159 for (i = 0; i < NUM_COUNTERS; ++i) { 175 for (i = 0; i < num_counters; ++i) {
160 if (!reset_value[i]) 176 if (!reset_value[i])
161 continue; 177 continue;
162 CTRL_READ(low, high, msrs, i); 178 CTRL_READ(low, high, msrs, i);
@@ -169,24 +185,70 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
169{ 185{
170 int i; 186 int i;
171 187
172 for (i = 0 ; i < NUM_COUNTERS ; ++i) { 188 for (i = 0 ; i < num_counters ; ++i) {
173 if (CTR_IS_RESERVED(msrs, i)) 189 if (CTR_IS_RESERVED(msrs, i))
174 release_perfctr_nmi(MSR_P6_PERFCTR0 + i); 190 release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
175 } 191 }
176 for (i = 0 ; i < NUM_CONTROLS ; ++i) { 192 for (i = 0 ; i < num_counters ; ++i) {
177 if (CTRL_IS_RESERVED(msrs, i)) 193 if (CTRL_IS_RESERVED(msrs, i))
178 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); 194 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
179 } 195 }
196 if (reset_value) {
197 kfree(reset_value);
198 reset_value = NULL;
199 }
180} 200}
181 201
182 202
183struct op_x86_model_spec const op_ppro_spec = { 203struct op_x86_model_spec op_ppro_spec = {
184 .num_counters = NUM_COUNTERS, 204 .num_counters = 2, /* can be overriden */
185 .num_controls = NUM_CONTROLS, 205 .num_controls = 2, /* dito */
186 .fill_in_addresses = &ppro_fill_in_addresses, 206 .fill_in_addresses = &ppro_fill_in_addresses,
187 .setup_ctrs = &ppro_setup_ctrs, 207 .setup_ctrs = &ppro_setup_ctrs,
188 .check_ctrs = &ppro_check_ctrs, 208 .check_ctrs = &ppro_check_ctrs,
189 .start = &ppro_start, 209 .start = &ppro_start,
190 .stop = &ppro_stop, 210 .stop = &ppro_stop,
191 .shutdown = &ppro_shutdown 211 .shutdown = &ppro_shutdown
212};
213
214/*
215 * Architectural performance monitoring.
216 *
217 * Newer Intel CPUs (Core1+) have support for architectural
218 * events described in CPUID 0xA. See the IA32 SDM Vol3b.18 for details.
219 * The advantage of this is that it can be done without knowing about
220 * the specific CPU.
221 */
222
223void arch_perfmon_setup_counters(void)
224{
225 union cpuid10_eax eax;
226
227 eax.full = cpuid_eax(0xa);
228
229 /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
230 if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
231 current_cpu_data.x86_model == 15) {
232 eax.split.version_id = 2;
233 eax.split.num_counters = 2;
234 eax.split.bit_width = 40;
235 }
236
237 num_counters = eax.split.num_counters;
238
239 op_arch_perfmon_spec.num_counters = num_counters;
240 op_arch_perfmon_spec.num_controls = num_counters;
241 op_ppro_spec.num_counters = num_counters;
242 op_ppro_spec.num_controls = num_counters;
243}
244
245struct op_x86_model_spec op_arch_perfmon_spec = {
246 /* num_counters/num_controls filled in at runtime */
247 .fill_in_addresses = &ppro_fill_in_addresses,
248 /* user space does the cpuid check for available events */
249 .setup_ctrs = &ppro_setup_ctrs,
250 .check_ctrs = &ppro_check_ctrs,
251 .start = &ppro_start,
252 .stop = &ppro_stop,
253 .shutdown = &ppro_shutdown
192}; 254};
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 05a0261ba0c3..825e79064d64 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -22,8 +22,8 @@ struct op_msr {
22}; 22};
23 23
24struct op_msrs { 24struct op_msrs {
25 struct op_msr * counters; 25 struct op_msr *counters;
26 struct op_msr * controls; 26 struct op_msr *controls;
27}; 27};
28 28
29struct pt_regs; 29struct pt_regs;
@@ -34,8 +34,8 @@ struct pt_regs;
34struct op_x86_model_spec { 34struct op_x86_model_spec {
35 int (*init)(struct oprofile_operations *ops); 35 int (*init)(struct oprofile_operations *ops);
36 void (*exit)(void); 36 void (*exit)(void);
37 unsigned int const num_counters; 37 unsigned int num_counters;
38 unsigned int const num_controls; 38 unsigned int num_controls;
39 void (*fill_in_addresses)(struct op_msrs * const msrs); 39 void (*fill_in_addresses)(struct op_msrs * const msrs);
40 void (*setup_ctrs)(struct op_msrs const * const msrs); 40 void (*setup_ctrs)(struct op_msrs const * const msrs);
41 int (*check_ctrs)(struct pt_regs * const regs, 41 int (*check_ctrs)(struct pt_regs * const regs,
@@ -45,9 +45,12 @@ struct op_x86_model_spec {
45 void (*shutdown)(struct op_msrs const * const msrs); 45 void (*shutdown)(struct op_msrs const * const msrs);
46}; 46};
47 47
48extern struct op_x86_model_spec const op_ppro_spec; 48extern struct op_x86_model_spec op_ppro_spec;
49extern struct op_x86_model_spec const op_p4_spec; 49extern struct op_x86_model_spec const op_p4_spec;
50extern struct op_x86_model_spec const op_p4_ht2_spec; 50extern struct op_x86_model_spec const op_p4_ht2_spec;
51extern struct op_x86_model_spec const op_amd_spec; 51extern struct op_x86_model_spec const op_amd_spec;
52extern struct op_x86_model_spec op_arch_perfmon_spec;
53
54extern void arch_perfmon_setup_counters(void);
52 55
53#endif /* OP_X86_MODEL_H */ 56#endif /* OP_X86_MODEL_H */
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index a213260b51e5..6c873dceb177 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -64,7 +64,12 @@ choice
64 default XTENSA_VARIANT_FSF 64 default XTENSA_VARIANT_FSF
65 65
66config XTENSA_VARIANT_FSF 66config XTENSA_VARIANT_FSF
67 bool "fsf" 67 bool "fsf - default (not generic) configuration"
68
69config XTENSA_VARIANT_DC232B
70 bool "dc232b - Diamond 232L Standard Core Rev.B (LE)"
71 help
72 This variant refers to Tensilica's Diamond 232L Standard core Rev.B (LE).
68endchoice 73endchoice
69 74
70config MMU 75config MMU
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 4bd1e14c6b90..015b6b2a26b9 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -14,6 +14,7 @@
14# (Use VAR=<xtensa_config> to use another default compiler.) 14# (Use VAR=<xtensa_config> to use another default compiler.)
15 15
16variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf 16variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf
17variant-$(CONFIG_XTENSA_VARIANT_DC232B) := dc232b
17variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom 18variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom
18 19
19VARIANT = $(variant-y) 20VARIANT = $(variant-y)
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index c9ea73b7031b..5fbcde59a92d 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -48,7 +48,7 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
48 48
49 if (irq >= NR_IRQS) { 49 if (irq >= NR_IRQS) {
50 printk(KERN_EMERG "%s: cannot handle IRQ %d\n", 50 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
51 __FUNCTION__, irq); 51 __func__, irq);
52 } 52 }
53 53
54 irq_enter(); 54 irq_enter();
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index a2e252217428..11a20adc1409 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -640,7 +640,7 @@ static int iss_net_configure(int index, char *init)
640 *lp = ((struct iss_net_private) { 640 *lp = ((struct iss_net_private) {
641 .device_list = LIST_HEAD_INIT(lp->device_list), 641 .device_list = LIST_HEAD_INIT(lp->device_list),
642 .opened_list = LIST_HEAD_INIT(lp->opened_list), 642 .opened_list = LIST_HEAD_INIT(lp->opened_list),
643 .lock = SPIN_LOCK_UNLOCKED, 643 .lock = __SPIN_LOCK_UNLOCKED(lp.lock),
644 .dev = dev, 644 .dev = dev,
645 .index = index, 645 .index = index,
646 //.fd = -1, 646 //.fd = -1,
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d19b6f5a1106..d38f43f593d4 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -78,6 +78,8 @@ source "drivers/hid/Kconfig"
78 78
79source "drivers/usb/Kconfig" 79source "drivers/usb/Kconfig"
80 80
81source "drivers/uwb/Kconfig"
82
81source "drivers/mmc/Kconfig" 83source "drivers/mmc/Kconfig"
82 84
83source "drivers/memstick/Kconfig" 85source "drivers/memstick/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 46c8681a07f4..cadc64fe8f68 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -100,3 +100,4 @@ obj-$(CONFIG_SSB) += ssb/
100obj-$(CONFIG_VIRTIO) += virtio/ 100obj-$(CONFIG_VIRTIO) += virtio/
101obj-$(CONFIG_REGULATOR) += regulator/ 101obj-$(CONFIG_REGULATOR) += regulator/
102obj-$(CONFIG_STAGING) += staging/ 102obj-$(CONFIG_STAGING) += staging/
103obj-$(CONFIG_UWB) += uwb/
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index d5b4ef898879..8d4a568be1cc 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -150,7 +150,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
150 } 150 }
151 151
152 snprintf(name, sizeof(name), "%u", (u32)sun); 152 snprintf(name, sizeof(name), "%u", (u32)sun);
153 pci_slot = pci_create_slot(pci_bus, device, name); 153 pci_slot = pci_create_slot(pci_bus, device, name, NULL);
154 if (IS_ERR(pci_slot)) { 154 if (IS_ERR(pci_slot)) {
155 err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot)); 155 err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
156 kfree(slot); 156 kfree(slot);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1ee9499bd343..bbb3cae57492 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5373,6 +5373,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5373 5373
5374#ifdef CONFIG_ATA_SFF 5374#ifdef CONFIG_ATA_SFF
5375 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task); 5375 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5376#else
5377 INIT_DELAYED_WORK(&ap->port_task, NULL);
5376#endif 5378#endif
5377 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5379 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5378 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5380 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index a93247cc395a..5d687d7cffae 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1206,7 +1206,10 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1206 1206
1207 ata_eh_clear_action(link, dev, ehi, action); 1207 ata_eh_clear_action(link, dev, ehi, action);
1208 1208
1209 if (!(ehc->i.flags & ATA_EHI_QUIET)) 1209 /* About to take EH action, set RECOVERED. Ignore actions on
1210 * slave links as master will do them again.
1211 */
1212 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1210 ap->pflags |= ATA_PFLAG_RECOVERED; 1213 ap->pflags |= ATA_PFLAG_RECOVERED;
1211 1214
1212 spin_unlock_irqrestore(ap->lock, flags); 1215 spin_unlock_irqrestore(ap->lock, flags);
@@ -2010,8 +2013,13 @@ void ata_eh_autopsy(struct ata_port *ap)
2010 struct ata_eh_context *mehc = &ap->link.eh_context; 2013 struct ata_eh_context *mehc = &ap->link.eh_context;
2011 struct ata_eh_context *sehc = &ap->slave_link->eh_context; 2014 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2012 2015
2016 /* transfer control flags from master to slave */
2017 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2018
2019 /* perform autopsy on the slave link */
2013 ata_eh_link_autopsy(ap->slave_link); 2020 ata_eh_link_autopsy(ap->slave_link);
2014 2021
2022 /* transfer actions from slave to master and clear slave */
2015 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS); 2023 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2016 mehc->i.action |= sehc->i.action; 2024 mehc->i.action |= sehc->i.action;
2017 mehc->i.dev_action[1] |= sehc->i.dev_action[1]; 2025 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
@@ -2447,14 +2455,14 @@ int ata_eh_reset(struct ata_link *link, int classify,
2447 dev->pio_mode = XFER_PIO_0; 2455 dev->pio_mode = XFER_PIO_0;
2448 dev->flags &= ~ATA_DFLAG_SLEEPING; 2456 dev->flags &= ~ATA_DFLAG_SLEEPING;
2449 2457
2450 if (ata_phys_link_offline(ata_dev_phys_link(dev))) 2458 if (!ata_phys_link_offline(ata_dev_phys_link(dev))) {
2451 continue; 2459 /* apply class override */
2452 2460 if (lflags & ATA_LFLAG_ASSUME_ATA)
2453 /* apply class override */ 2461 classes[dev->devno] = ATA_DEV_ATA;
2454 if (lflags & ATA_LFLAG_ASSUME_ATA) 2462 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2455 classes[dev->devno] = ATA_DEV_ATA; 2463 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2456 else if (lflags & ATA_LFLAG_ASSUME_SEMB) 2464 } else
2457 classes[dev->devno] = ATA_DEV_SEMB_UNSUP; /* not yet */ 2465 classes[dev->devno] = ATA_DEV_NONE;
2458 } 2466 }
2459 2467
2460 /* record current link speed */ 2468 /* record current link speed */
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2a4c516894f0..4b4739486327 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2153,8 +2153,17 @@ void ata_sff_error_handler(struct ata_port *ap)
2153 */ 2153 */
2154void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) 2154void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc)
2155{ 2155{
2156 if (qc->ap->ioaddr.bmdma_addr) 2156 struct ata_port *ap = qc->ap;
2157 unsigned long flags;
2158
2159 spin_lock_irqsave(ap->lock, flags);
2160
2161 ap->hsm_task_state = HSM_ST_IDLE;
2162
2163 if (ap->ioaddr.bmdma_addr)
2157 ata_bmdma_stop(qc); 2164 ata_bmdma_stop(qc);
2165
2166 spin_unlock_irqrestore(ap->lock, flags);
2158} 2167}
2159 2168
2160/** 2169/**
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 1cfa74535d91..5b72e734300a 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -70,6 +70,7 @@ enum {
70static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 70static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
71static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 71static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
72static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 72static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
73static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
73static void svia_noop_freeze(struct ata_port *ap); 74static void svia_noop_freeze(struct ata_port *ap);
74static int vt6420_prereset(struct ata_link *link, unsigned long deadline); 75static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
75static int vt6421_pata_cable_detect(struct ata_port *ap); 76static int vt6421_pata_cable_detect(struct ata_port *ap);
@@ -103,21 +104,26 @@ static struct scsi_host_template svia_sht = {
103 ATA_BMDMA_SHT(DRV_NAME), 104 ATA_BMDMA_SHT(DRV_NAME),
104}; 105};
105 106
106static struct ata_port_operations vt6420_sata_ops = { 107static struct ata_port_operations svia_base_ops = {
107 .inherits = &ata_bmdma_port_ops, 108 .inherits = &ata_bmdma_port_ops,
109 .sff_tf_load = svia_tf_load,
110};
111
112static struct ata_port_operations vt6420_sata_ops = {
113 .inherits = &svia_base_ops,
108 .freeze = svia_noop_freeze, 114 .freeze = svia_noop_freeze,
109 .prereset = vt6420_prereset, 115 .prereset = vt6420_prereset,
110}; 116};
111 117
112static struct ata_port_operations vt6421_pata_ops = { 118static struct ata_port_operations vt6421_pata_ops = {
113 .inherits = &ata_bmdma_port_ops, 119 .inherits = &svia_base_ops,
114 .cable_detect = vt6421_pata_cable_detect, 120 .cable_detect = vt6421_pata_cable_detect,
115 .set_piomode = vt6421_set_pio_mode, 121 .set_piomode = vt6421_set_pio_mode,
116 .set_dmamode = vt6421_set_dma_mode, 122 .set_dmamode = vt6421_set_dma_mode,
117}; 123};
118 124
119static struct ata_port_operations vt6421_sata_ops = { 125static struct ata_port_operations vt6421_sata_ops = {
120 .inherits = &ata_bmdma_port_ops, 126 .inherits = &svia_base_ops,
121 .scr_read = svia_scr_read, 127 .scr_read = svia_scr_read,
122 .scr_write = svia_scr_write, 128 .scr_write = svia_scr_write,
123}; 129};
@@ -168,6 +174,29 @@ static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
168 return 0; 174 return 0;
169} 175}
170 176
177/**
178 * svia_tf_load - send taskfile registers to host controller
179 * @ap: Port to which output is sent
180 * @tf: ATA taskfile register set
181 *
182 * Outputs ATA taskfile to standard ATA host controller.
183 *
184 * This is to fix the internal bug of via chipsets, which will
185 * reset the device register after changing the IEN bit on ctl
186 * register.
187 */
188static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
189{
190 struct ata_taskfile ttf;
191
192 if (tf->ctl != ap->last_ctl) {
193 ttf = *tf;
194 ttf.flags |= ATA_TFLAG_DEVICE;
195 tf = &ttf;
196 }
197 ata_sff_tf_load(ap, tf);
198}
199
171static void svia_noop_freeze(struct ata_port *ap) 200static void svia_noop_freeze(struct ata_port *ap)
172{ 201{
173 /* Some VIA controllers choke if ATA_NIEN is manipulated in 202 /* Some VIA controllers choke if ATA_NIEN is manipulated in
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index bf70450a49cc..5b819b12675a 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -161,7 +161,7 @@ static void hvc_console_print(struct console *co, const char *b,
161 } 161 }
162 } else { 162 } else {
163 r = cons_ops[index]->put_chars(vtermnos[index], c, i); 163 r = cons_ops[index]->put_chars(vtermnos[index], c, i);
164 if (r < 0) { 164 if (r <= 0) {
165 /* throw away chars on error */ 165 /* throw away chars on error */
166 i = 0; 166 i = 0;
167 } else if (r > 0) { 167 } else if (r > 0) {
@@ -374,6 +374,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
374 if (hp->ops->notifier_del) 374 if (hp->ops->notifier_del)
375 hp->ops->notifier_del(hp, hp->data); 375 hp->ops->notifier_del(hp, hp->data);
376 376
377 /* cancel pending tty resize work */
378 cancel_work_sync(&hp->tty_resize);
379
377 /* 380 /*
378 * Chain calls chars_in_buffer() and returns immediately if 381 * Chain calls chars_in_buffer() and returns immediately if
379 * there is no buffered data otherwise sleeps on a wait queue 382 * there is no buffered data otherwise sleeps on a wait queue
@@ -399,6 +402,9 @@ static void hvc_hangup(struct tty_struct *tty)
399 if (!hp) 402 if (!hp)
400 return; 403 return;
401 404
405 /* cancel pending tty resize work */
406 cancel_work_sync(&hp->tty_resize);
407
402 spin_lock_irqsave(&hp->lock, flags); 408 spin_lock_irqsave(&hp->lock, flags);
403 409
404 /* 410 /*
@@ -418,8 +424,8 @@ static void hvc_hangup(struct tty_struct *tty)
418 424
419 spin_unlock_irqrestore(&hp->lock, flags); 425 spin_unlock_irqrestore(&hp->lock, flags);
420 426
421 if (hp->ops->notifier_del) 427 if (hp->ops->notifier_hangup)
422 hp->ops->notifier_del(hp, hp->data); 428 hp->ops->notifier_hangup(hp, hp->data);
423 429
424 while(temp_open_count) { 430 while(temp_open_count) {
425 --temp_open_count; 431 --temp_open_count;
@@ -431,7 +437,7 @@ static void hvc_hangup(struct tty_struct *tty)
431 * Push buffered characters whether they were just recently buffered or waiting 437 * Push buffered characters whether they were just recently buffered or waiting
432 * on a blocked hypervisor. Call this function with hp->lock held. 438 * on a blocked hypervisor. Call this function with hp->lock held.
433 */ 439 */
434static void hvc_push(struct hvc_struct *hp) 440static int hvc_push(struct hvc_struct *hp)
435{ 441{
436 int n; 442 int n;
437 443
@@ -439,7 +445,7 @@ static void hvc_push(struct hvc_struct *hp)
439 if (n <= 0) { 445 if (n <= 0) {
440 if (n == 0) { 446 if (n == 0) {
441 hp->do_wakeup = 1; 447 hp->do_wakeup = 1;
442 return; 448 return 0;
443 } 449 }
444 /* throw away output on error; this happens when 450 /* throw away output on error; this happens when
445 there is no session connected to the vterm. */ 451 there is no session connected to the vterm. */
@@ -450,6 +456,8 @@ static void hvc_push(struct hvc_struct *hp)
450 memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf); 456 memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf);
451 else 457 else
452 hp->do_wakeup = 1; 458 hp->do_wakeup = 1;
459
460 return n;
453} 461}
454 462
455static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count) 463static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -492,6 +500,39 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
492 return written; 500 return written;
493} 501}
494 502
503/**
504 * hvc_set_winsz() - Resize the hvc tty terminal window.
505 * @work: work structure.
506 *
507 * The routine shall not be called within an atomic context because it
508 * might sleep.
509 *
510 * Locking: hp->lock
511 */
512static void hvc_set_winsz(struct work_struct *work)
513{
514 struct hvc_struct *hp;
515 unsigned long hvc_flags;
516 struct tty_struct *tty;
517 struct winsize ws;
518
519 hp = container_of(work, struct hvc_struct, tty_resize);
520 if (!hp)
521 return;
522
523 spin_lock_irqsave(&hp->lock, hvc_flags);
524 if (!hp->tty) {
525 spin_unlock_irqrestore(&hp->lock, hvc_flags);
526 return;
527 }
528 ws = hp->ws;
529 tty = tty_kref_get(hp->tty);
530 spin_unlock_irqrestore(&hp->lock, hvc_flags);
531
532 tty_do_resize(tty, tty, &ws);
533 tty_kref_put(tty);
534}
535
495/* 536/*
496 * This is actually a contract between the driver and the tty layer outlining 537 * This is actually a contract between the driver and the tty layer outlining
497 * how much write room the driver can guarantee will be sent OR BUFFERED. This 538 * how much write room the driver can guarantee will be sent OR BUFFERED. This
@@ -538,16 +579,20 @@ int hvc_poll(struct hvc_struct *hp)
538 char buf[N_INBUF] __ALIGNED__; 579 char buf[N_INBUF] __ALIGNED__;
539 unsigned long flags; 580 unsigned long flags;
540 int read_total = 0; 581 int read_total = 0;
582 int written_total = 0;
541 583
542 spin_lock_irqsave(&hp->lock, flags); 584 spin_lock_irqsave(&hp->lock, flags);
543 585
544 /* Push pending writes */ 586 /* Push pending writes */
545 if (hp->n_outbuf > 0) 587 if (hp->n_outbuf > 0)
546 hvc_push(hp); 588 written_total = hvc_push(hp);
547 589
548 /* Reschedule us if still some write pending */ 590 /* Reschedule us if still some write pending */
549 if (hp->n_outbuf > 0) 591 if (hp->n_outbuf > 0) {
550 poll_mask |= HVC_POLL_WRITE; 592 poll_mask |= HVC_POLL_WRITE;
593 /* If hvc_push() was not able to write, sleep a few msecs */
594 timeout = (written_total) ? 0 : MIN_TIMEOUT;
595 }
551 596
552 /* No tty attached, just skip */ 597 /* No tty attached, just skip */
553 tty = hp->tty; 598 tty = hp->tty;
@@ -632,6 +677,24 @@ int hvc_poll(struct hvc_struct *hp)
632} 677}
633EXPORT_SYMBOL_GPL(hvc_poll); 678EXPORT_SYMBOL_GPL(hvc_poll);
634 679
680/**
681 * hvc_resize() - Update terminal window size information.
682 * @hp: HVC console pointer
683 * @ws: Terminal window size structure
684 *
685 * Stores the specified window size information in the hvc structure of @hp.
686 * The function schedule the tty resize update.
687 *
688 * Locking: Locking free; the function MUST be called holding hp->lock
689 */
690void hvc_resize(struct hvc_struct *hp, struct winsize ws)
691{
692 if ((hp->ws.ws_row != ws.ws_row) || (hp->ws.ws_col != ws.ws_col)) {
693 hp->ws = ws;
694 schedule_work(&hp->tty_resize);
695 }
696}
697
635/* 698/*
636 * This kthread is either polling or interrupt driven. This is determined by 699 * This kthread is either polling or interrupt driven. This is determined by
637 * calling hvc_poll() who determines whether a console adapter support 700 * calling hvc_poll() who determines whether a console adapter support
@@ -659,10 +722,6 @@ static int khvcd(void *unused)
659 poll_mask |= HVC_POLL_READ; 722 poll_mask |= HVC_POLL_READ;
660 if (hvc_kicked) 723 if (hvc_kicked)
661 continue; 724 continue;
662 if (poll_mask & HVC_POLL_WRITE) {
663 yield();
664 continue;
665 }
666 set_current_state(TASK_INTERRUPTIBLE); 725 set_current_state(TASK_INTERRUPTIBLE);
667 if (!hvc_kicked) { 726 if (!hvc_kicked) {
668 if (poll_mask == 0) 727 if (poll_mask == 0)
@@ -718,6 +777,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
718 777
719 kref_init(&hp->kref); 778 kref_init(&hp->kref);
720 779
780 INIT_WORK(&hp->tty_resize, hvc_set_winsz);
721 spin_lock_init(&hp->lock); 781 spin_lock_init(&hp->lock);
722 spin_lock(&hvc_structs_lock); 782 spin_lock(&hvc_structs_lock);
723 783
@@ -743,7 +803,7 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
743} 803}
744EXPORT_SYMBOL_GPL(hvc_alloc); 804EXPORT_SYMBOL_GPL(hvc_alloc);
745 805
746int __devexit hvc_remove(struct hvc_struct *hp) 806int hvc_remove(struct hvc_struct *hp)
747{ 807{
748 unsigned long flags; 808 unsigned long flags;
749 struct tty_struct *tty; 809 struct tty_struct *tty;
@@ -796,7 +856,7 @@ static int hvc_init(void)
796 drv->minor_start = HVC_MINOR; 856 drv->minor_start = HVC_MINOR;
797 drv->type = TTY_DRIVER_TYPE_SYSTEM; 857 drv->type = TTY_DRIVER_TYPE_SYSTEM;
798 drv->init_termios = tty_std_termios; 858 drv->init_termios = tty_std_termios;
799 drv->flags = TTY_DRIVER_REAL_RAW; 859 drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
800 tty_set_operations(drv, &hvc_ops); 860 tty_set_operations(drv, &hvc_ops);
801 861
802 /* Always start the kthread because there can be hotplug vty adapters 862 /* Always start the kthread because there can be hotplug vty adapters
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 9790201718ae..8297dbc2e6ec 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -27,6 +27,7 @@
27#ifndef HVC_CONSOLE_H 27#ifndef HVC_CONSOLE_H
28#define HVC_CONSOLE_H 28#define HVC_CONSOLE_H
29#include <linux/kref.h> 29#include <linux/kref.h>
30#include <linux/tty.h>
30 31
31/* 32/*
32 * This is the max number of console adapters that can/will be found as 33 * This is the max number of console adapters that can/will be found as
@@ -56,6 +57,8 @@ struct hvc_struct {
56 struct hv_ops *ops; 57 struct hv_ops *ops;
57 int irq_requested; 58 int irq_requested;
58 int data; 59 int data;
60 struct winsize ws;
61 struct work_struct tty_resize;
59 struct list_head next; 62 struct list_head next;
60 struct kref kref; /* ref count & hvc_struct lifetime */ 63 struct kref kref; /* ref count & hvc_struct lifetime */
61}; 64};
@@ -65,9 +68,10 @@ struct hv_ops {
65 int (*get_chars)(uint32_t vtermno, char *buf, int count); 68 int (*get_chars)(uint32_t vtermno, char *buf, int count);
66 int (*put_chars)(uint32_t vtermno, const char *buf, int count); 69 int (*put_chars)(uint32_t vtermno, const char *buf, int count);
67 70
68 /* Callbacks for notification. Called in open and close */ 71 /* Callbacks for notification. Called in open, close and hangup */
69 int (*notifier_add)(struct hvc_struct *hp, int irq); 72 int (*notifier_add)(struct hvc_struct *hp, int irq);
70 void (*notifier_del)(struct hvc_struct *hp, int irq); 73 void (*notifier_del)(struct hvc_struct *hp, int irq);
74 void (*notifier_hangup)(struct hvc_struct *hp, int irq);
71}; 75};
72 76
73/* Register a vterm and a slot index for use as a console (console_init) */ 77/* Register a vterm and a slot index for use as a console (console_init) */
@@ -77,15 +81,19 @@ extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
77extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data, 81extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
78 struct hv_ops *ops, int outbuf_size); 82 struct hv_ops *ops, int outbuf_size);
79/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ 83/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
80extern int __devexit hvc_remove(struct hvc_struct *hp); 84extern int hvc_remove(struct hvc_struct *hp);
81 85
82/* data available */ 86/* data available */
83int hvc_poll(struct hvc_struct *hp); 87int hvc_poll(struct hvc_struct *hp);
84void hvc_kick(void); 88void hvc_kick(void);
85 89
90/* Resize hvc tty terminal window */
91extern void hvc_resize(struct hvc_struct *hp, struct winsize ws);
92
86/* default notifier for irq based notification */ 93/* default notifier for irq based notification */
87extern int notifier_add_irq(struct hvc_struct *hp, int data); 94extern int notifier_add_irq(struct hvc_struct *hp, int data);
88extern void notifier_del_irq(struct hvc_struct *hp, int data); 95extern void notifier_del_irq(struct hvc_struct *hp, int data);
96extern void notifier_hangup_irq(struct hvc_struct *hp, int data);
89 97
90 98
91#if defined(CONFIG_XMON) && defined(CONFIG_SMP) 99#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
diff --git a/drivers/char/hvc_irq.c b/drivers/char/hvc_irq.c
index 73a59cdb8947..d09e5688d449 100644
--- a/drivers/char/hvc_irq.c
+++ b/drivers/char/hvc_irq.c
@@ -42,3 +42,8 @@ void notifier_del_irq(struct hvc_struct *hp, int irq)
42 free_irq(irq, hp); 42 free_irq(irq, hp);
43 hp->irq_requested = 0; 43 hp->irq_requested = 0;
44} 44}
45
46void notifier_hangup_irq(struct hvc_struct *hp, int irq)
47{
48 notifier_del_irq(hp, irq);
49}
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
index b71c610fe5ae..b74a2f8ab908 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/char/hvc_iseries.c
@@ -202,6 +202,7 @@ static struct hv_ops hvc_get_put_ops = {
202 .put_chars = put_chars, 202 .put_chars = put_chars,
203 .notifier_add = notifier_add_irq, 203 .notifier_add = notifier_add_irq,
204 .notifier_del = notifier_del_irq, 204 .notifier_del = notifier_del_irq,
205 .notifier_hangup = notifier_hangup_irq,
205}; 206};
206 207
207static int __devinit hvc_vio_probe(struct vio_dev *vdev, 208static int __devinit hvc_vio_probe(struct vio_dev *vdev,
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 93f3840c1682..019e0b58593d 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -82,6 +82,7 @@ static struct hv_ops hvc_get_put_ops = {
82 .put_chars = hvc_put_chars, 82 .put_chars = hvc_put_chars,
83 .notifier_add = notifier_add_irq, 83 .notifier_add = notifier_add_irq,
84 .notifier_del = notifier_del_irq, 84 .notifier_del = notifier_del_irq,
85 .notifier_hangup = notifier_hangup_irq,
85}; 86};
86 87
87static int __devinit hvc_vio_probe(struct vio_dev *vdev, 88static int __devinit hvc_vio_probe(struct vio_dev *vdev,
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index 538ceea5e7df..eba999f8598d 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -102,6 +102,7 @@ static struct hv_ops hvc_ops = {
102 .put_chars = write_console, 102 .put_chars = write_console,
103 .notifier_add = notifier_add_irq, 103 .notifier_add = notifier_add_irq,
104 .notifier_del = notifier_del_irq, 104 .notifier_del = notifier_del_irq,
105 .notifier_hangup = notifier_hangup_irq,
105}; 106};
106 107
107static int __init xen_init(void) 108static int __init xen_init(void)
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index 553b0e9d8d17..c8f8024cb40e 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -90,7 +90,7 @@ void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty)
90 spin_lock_irqsave(&port->lock, flags); 90 spin_lock_irqsave(&port->lock, flags);
91 if (port->tty) 91 if (port->tty)
92 tty_kref_put(port->tty); 92 tty_kref_put(port->tty);
93 port->tty = tty; 93 port->tty = tty_kref_get(tty);
94 spin_unlock_irqrestore(&port->lock, flags); 94 spin_unlock_irqrestore(&port->lock, flags);
95} 95}
96EXPORT_SYMBOL(tty_port_tty_set); 96EXPORT_SYMBOL(tty_port_tty_set);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d0f4eb6fdb7f..3fb0d2c88ba5 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -198,6 +198,7 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
198 virtio_cons.put_chars = put_chars; 198 virtio_cons.put_chars = put_chars;
199 virtio_cons.notifier_add = notifier_add_vio; 199 virtio_cons.notifier_add = notifier_add_vio;
200 virtio_cons.notifier_del = notifier_del_vio; 200 virtio_cons.notifier_del = notifier_del_vio;
201 virtio_cons.notifier_hangup = notifier_del_vio;
201 202
202 /* The first argument of hvc_alloc() is the virtual console number, so 203 /* The first argument of hvc_alloc() is the virtual console number, so
203 * we use zero. The second argument is the parameter for the 204 * we use zero. The second argument is the parameter for the
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index dbd42d6c93a7..7f2ee27fe76b 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -127,6 +127,13 @@ config GPIO_PCF857X
127 This driver provides an in-kernel interface to those GPIOs using 127 This driver provides an in-kernel interface to those GPIOs using
128 platform-neutral GPIO calls. 128 platform-neutral GPIO calls.
129 129
130config GPIO_TWL4030
131 tristate "TWL4030, TWL5030, and TPS659x0 GPIOs"
132 depends on TWL4030_CORE
133 help
134 Say yes here to access the GPIO signals of various multi-function
135 power management chips from Texas Instruments.
136
130comment "PCI GPIO expanders:" 137comment "PCI GPIO expanders:"
131 138
132config GPIO_BT8XX 139config GPIO_BT8XX
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 01b4bbde1956..6aafdeb9ad03 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_GPIO_MAX732X) += max732x.o
9obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o 9obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
10obj-$(CONFIG_GPIO_PCA953X) += pca953x.o 10obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
11obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o 11obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
12obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
12obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o 13obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/twl4030-gpio.c
new file mode 100644
index 000000000000..37d3eec8730a
--- /dev/null
+++ b/drivers/gpio/twl4030-gpio.c
@@ -0,0 +1,521 @@
1/*
2 * twl4030_gpio.c -- access to GPIOs on TWL4030/TPS659x0 chips
3 *
4 * Copyright (C) 2006-2007 Texas Instruments, Inc.
5 * Copyright (C) 2006 MontaVista Software, Inc.
6 *
7 * Code re-arranged and cleaned up by:
8 * Syed Mohammed Khasim <x0khasim@ti.com>
9 *
10 * Initial Code:
11 * Andy Lowe / Nishanth Menon
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/kthread.h>
32#include <linux/irq.h>
33#include <linux/gpio.h>
34#include <linux/platform_device.h>
35#include <linux/slab.h>
36
37#include <linux/i2c/twl4030.h>
38
39
40/*
41 * The GPIO "subchip" supports 18 GPIOs which can be configured as
42 * inputs or outputs, with pullups or pulldowns on each pin. Each
43 * GPIO can trigger interrupts on either or both edges.
44 *
45 * GPIO interrupts can be fed to either of two IRQ lines; this is
46 * intended to support multiple hosts.
47 *
48 * There are also two LED pins used sometimes as output-only GPIOs.
49 */
50
51
52static struct gpio_chip twl_gpiochip;
53static int twl4030_gpio_irq_base;
54
55/* genirq interfaces are not available to modules */
56#ifdef MODULE
57#define is_module() true
58#else
59#define is_module() false
60#endif
61
62/* GPIO_CTRL Fields */
63#define MASK_GPIO_CTRL_GPIO0CD1 BIT(0)
64#define MASK_GPIO_CTRL_GPIO1CD2 BIT(1)
65#define MASK_GPIO_CTRL_GPIO_ON BIT(2)
66
67/* Mask for GPIO registers when aggregated into a 32-bit integer */
68#define GPIO_32_MASK 0x0003ffff
69
70/* Data structures */
71static DEFINE_MUTEX(gpio_lock);
72
73/* store usage of each GPIO. - each bit represents one GPIO */
74static unsigned int gpio_usage_count;
75
76/*----------------------------------------------------------------------*/
77
78/*
79 * To configure TWL4030 GPIO module registers
80 */
81static inline int gpio_twl4030_write(u8 address, u8 data)
82{
83 return twl4030_i2c_write_u8(TWL4030_MODULE_GPIO, data, address);
84}
85
86/*----------------------------------------------------------------------*/
87
88/*
89 * LED register offsets (use TWL4030_MODULE_{LED,PWMA,PWMB}))
90 * PWMs A and B are dedicated to LEDs A and B, respectively.
91 */
92
93#define TWL4030_LED_LEDEN 0x0
94
95/* LEDEN bits */
96#define LEDEN_LEDAON BIT(0)
97#define LEDEN_LEDBON BIT(1)
98#define LEDEN_LEDAEXT BIT(2)
99#define LEDEN_LEDBEXT BIT(3)
100#define LEDEN_LEDAPWM BIT(4)
101#define LEDEN_LEDBPWM BIT(5)
102#define LEDEN_PWM_LENGTHA BIT(6)
103#define LEDEN_PWM_LENGTHB BIT(7)
104
105#define TWL4030_PWMx_PWMxON 0x0
106#define TWL4030_PWMx_PWMxOFF 0x1
107
108#define PWMxON_LENGTH BIT(7)
109
110/*----------------------------------------------------------------------*/
111
112/*
113 * To read a TWL4030 GPIO module register
114 */
115static inline int gpio_twl4030_read(u8 address)
116{
117 u8 data;
118 int ret = 0;
119
120 ret = twl4030_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address);
121 return (ret < 0) ? ret : data;
122}
123
124/*----------------------------------------------------------------------*/
125
126static u8 cached_leden; /* protected by gpio_lock */
127
128/* The LED lines are open drain outputs ... a FET pulls to GND, so an
129 * external pullup is needed. We could also expose the integrated PWM
130 * as a LED brightness control; we initialize it as "always on".
131 */
132static void twl4030_led_set_value(int led, int value)
133{
134 u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM;
135 int status;
136
137 if (led)
138 mask <<= 1;
139
140 mutex_lock(&gpio_lock);
141 if (value)
142 cached_leden &= ~mask;
143 else
144 cached_leden |= mask;
145 status = twl4030_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
146 TWL4030_LED_LEDEN);
147 mutex_unlock(&gpio_lock);
148}
149
150static int twl4030_set_gpio_direction(int gpio, int is_input)
151{
152 u8 d_bnk = gpio >> 3;
153 u8 d_msk = BIT(gpio & 0x7);
154 u8 reg = 0;
155 u8 base = REG_GPIODATADIR1 + d_bnk;
156 int ret = 0;
157
158 mutex_lock(&gpio_lock);
159 ret = gpio_twl4030_read(base);
160 if (ret >= 0) {
161 if (is_input)
162 reg = ret & ~d_msk;
163 else
164 reg = ret | d_msk;
165
166 ret = gpio_twl4030_write(base, reg);
167 }
168 mutex_unlock(&gpio_lock);
169 return ret;
170}
171
172static int twl4030_set_gpio_dataout(int gpio, int enable)
173{
174 u8 d_bnk = gpio >> 3;
175 u8 d_msk = BIT(gpio & 0x7);
176 u8 base = 0;
177
178 if (enable)
179 base = REG_SETGPIODATAOUT1 + d_bnk;
180 else
181 base = REG_CLEARGPIODATAOUT1 + d_bnk;
182
183 return gpio_twl4030_write(base, d_msk);
184}
185
186static int twl4030_get_gpio_datain(int gpio)
187{
188 u8 d_bnk = gpio >> 3;
189 u8 d_off = gpio & 0x7;
190 u8 base = 0;
191 int ret = 0;
192
193 if (unlikely((gpio >= TWL4030_GPIO_MAX)
194 || !(gpio_usage_count & BIT(gpio))))
195 return -EPERM;
196
197 base = REG_GPIODATAIN1 + d_bnk;
198 ret = gpio_twl4030_read(base);
199 if (ret > 0)
200 ret = (ret >> d_off) & 0x1;
201
202 return ret;
203}
204
205/*
206 * Configure debounce timing value for a GPIO pin on TWL4030
207 */
208int twl4030_set_gpio_debounce(int gpio, int enable)
209{
210 u8 d_bnk = gpio >> 3;
211 u8 d_msk = BIT(gpio & 0x7);
212 u8 reg = 0;
213 u8 base = 0;
214 int ret = 0;
215
216 if (unlikely((gpio >= TWL4030_GPIO_MAX)
217 || !(gpio_usage_count & BIT(gpio))))
218 return -EPERM;
219
220 base = REG_GPIO_DEBEN1 + d_bnk;
221 mutex_lock(&gpio_lock);
222 ret = gpio_twl4030_read(base);
223 if (ret >= 0) {
224 if (enable)
225 reg = ret | d_msk;
226 else
227 reg = ret & ~d_msk;
228
229 ret = gpio_twl4030_write(base, reg);
230 }
231 mutex_unlock(&gpio_lock);
232 return ret;
233}
234EXPORT_SYMBOL(twl4030_set_gpio_debounce);
235
236/*----------------------------------------------------------------------*/
237
238static int twl_request(struct gpio_chip *chip, unsigned offset)
239{
240 int status = 0;
241
242 mutex_lock(&gpio_lock);
243
244 /* Support the two LED outputs as output-only GPIOs. */
245 if (offset >= TWL4030_GPIO_MAX) {
246 u8 ledclr_mask = LEDEN_LEDAON | LEDEN_LEDAEXT
247 | LEDEN_LEDAPWM | LEDEN_PWM_LENGTHA;
248 u8 module = TWL4030_MODULE_PWMA;
249
250 offset -= TWL4030_GPIO_MAX;
251 if (offset) {
252 ledclr_mask <<= 1;
253 module = TWL4030_MODULE_PWMB;
254 }
255
256 /* initialize PWM to always-drive */
257 status = twl4030_i2c_write_u8(module, 0x7f,
258 TWL4030_PWMx_PWMxOFF);
259 if (status < 0)
260 goto done;
261 status = twl4030_i2c_write_u8(module, 0x7f,
262 TWL4030_PWMx_PWMxON);
263 if (status < 0)
264 goto done;
265
266 /* init LED to not-driven (high) */
267 module = TWL4030_MODULE_LED;
268 status = twl4030_i2c_read_u8(module, &cached_leden,
269 TWL4030_LED_LEDEN);
270 if (status < 0)
271 goto done;
272 cached_leden &= ~ledclr_mask;
273 status = twl4030_i2c_write_u8(module, cached_leden,
274 TWL4030_LED_LEDEN);
275 if (status < 0)
276 goto done;
277
278 status = 0;
279 goto done;
280 }
281
282 /* on first use, turn GPIO module "on" */
283 if (!gpio_usage_count) {
284 struct twl4030_gpio_platform_data *pdata;
285 u8 value = MASK_GPIO_CTRL_GPIO_ON;
286
287 /* optionally have the first two GPIOs switch vMMC1
288 * and vMMC2 power supplies based on card presence.
289 */
290 pdata = chip->dev->platform_data;
291 value |= pdata->mmc_cd & 0x03;
292
293 status = gpio_twl4030_write(REG_GPIO_CTRL, value);
294 }
295
296 if (!status)
297 gpio_usage_count |= (0x1 << offset);
298
299done:
300 mutex_unlock(&gpio_lock);
301 return status;
302}
303
304static void twl_free(struct gpio_chip *chip, unsigned offset)
305{
306 if (offset >= TWL4030_GPIO_MAX) {
307 twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1);
308 return;
309 }
310
311 mutex_lock(&gpio_lock);
312
313 gpio_usage_count &= ~BIT(offset);
314
315 /* on last use, switch off GPIO module */
316 if (!gpio_usage_count)
317 gpio_twl4030_write(REG_GPIO_CTRL, 0x0);
318
319 mutex_unlock(&gpio_lock);
320}
321
322static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
323{
324 return (offset < TWL4030_GPIO_MAX)
325 ? twl4030_set_gpio_direction(offset, 1)
326 : -EINVAL;
327}
328
329static int twl_get(struct gpio_chip *chip, unsigned offset)
330{
331 int status = 0;
332
333 if (offset < TWL4030_GPIO_MAX)
334 status = twl4030_get_gpio_datain(offset);
335 else if (offset == TWL4030_GPIO_MAX)
336 status = cached_leden & LEDEN_LEDAON;
337 else
338 status = cached_leden & LEDEN_LEDBON;
339 return (status < 0) ? 0 : status;
340}
341
342static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
343{
344 if (offset < TWL4030_GPIO_MAX) {
345 twl4030_set_gpio_dataout(offset, value);
346 return twl4030_set_gpio_direction(offset, 0);
347 } else {
348 twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
349 return 0;
350 }
351}
352
353static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
354{
355 if (offset < TWL4030_GPIO_MAX)
356 twl4030_set_gpio_dataout(offset, value);
357 else
358 twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
359}
360
361static int twl_to_irq(struct gpio_chip *chip, unsigned offset)
362{
363 return (twl4030_gpio_irq_base && (offset < TWL4030_GPIO_MAX))
364 ? (twl4030_gpio_irq_base + offset)
365 : -EINVAL;
366}
367
368static struct gpio_chip twl_gpiochip = {
369 .label = "twl4030",
370 .owner = THIS_MODULE,
371 .request = twl_request,
372 .free = twl_free,
373 .direction_input = twl_direction_in,
374 .get = twl_get,
375 .direction_output = twl_direction_out,
376 .set = twl_set,
377 .to_irq = twl_to_irq,
378 .can_sleep = 1,
379};
380
381/*----------------------------------------------------------------------*/
382
383static int __devinit gpio_twl4030_pulls(u32 ups, u32 downs)
384{
385 u8 message[6];
386 unsigned i, gpio_bit;
387
388 /* For most pins, a pulldown was enabled by default.
389 * We should have data that's specific to this board.
390 */
391 for (gpio_bit = 1, i = 1; i < 6; i++) {
392 u8 bit_mask;
393 unsigned j;
394
395 for (bit_mask = 0, j = 0; j < 8; j += 2, gpio_bit <<= 1) {
396 if (ups & gpio_bit)
397 bit_mask |= 1 << (j + 1);
398 else if (downs & gpio_bit)
399 bit_mask |= 1 << (j + 0);
400 }
401 message[i] = bit_mask;
402 }
403
404 return twl4030_i2c_write(TWL4030_MODULE_GPIO, message,
405 REG_GPIOPUPDCTR1, 5);
406}
407
408static int gpio_twl4030_remove(struct platform_device *pdev);
409
410static int __devinit gpio_twl4030_probe(struct platform_device *pdev)
411{
412 struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
413 int ret;
414
415 /* maybe setup IRQs */
416 if (pdata->irq_base) {
417 if (is_module()) {
418 dev_err(&pdev->dev,
419 "can't dispatch IRQs from modules\n");
420 goto no_irqs;
421 }
422 ret = twl4030_sih_setup(TWL4030_MODULE_GPIO);
423 if (ret < 0)
424 return ret;
425 WARN_ON(ret != pdata->irq_base);
426 twl4030_gpio_irq_base = ret;
427 }
428
429no_irqs:
430 /*
431 * NOTE: boards may waste power if they don't set pullups
432 * and pulldowns correctly ... default for non-ULPI pins is
433 * pulldown, and some other pins may have external pullups
434 * or pulldowns. Careful!
435 */
436 ret = gpio_twl4030_pulls(pdata->pullups, pdata->pulldowns);
437 if (ret)
438 dev_dbg(&pdev->dev, "pullups %.05x %.05x --> %d\n",
439 pdata->pullups, pdata->pulldowns,
440 ret);
441
442 twl_gpiochip.base = pdata->gpio_base;
443 twl_gpiochip.ngpio = TWL4030_GPIO_MAX;
444 twl_gpiochip.dev = &pdev->dev;
445
446 /* NOTE: we assume VIBRA_CTL.VIBRA_EN, in MODULE_AUDIO_VOICE,
447 * is (still) clear if use_leds is set.
448 */
449 if (pdata->use_leds)
450 twl_gpiochip.ngpio += 2;
451
452 ret = gpiochip_add(&twl_gpiochip);
453 if (ret < 0) {
454 dev_err(&pdev->dev,
455 "could not register gpiochip, %d\n",
456 ret);
457 twl_gpiochip.ngpio = 0;
458 gpio_twl4030_remove(pdev);
459 } else if (pdata->setup) {
460 int status;
461
462 status = pdata->setup(&pdev->dev,
463 pdata->gpio_base, TWL4030_GPIO_MAX);
464 if (status)
465 dev_dbg(&pdev->dev, "setup --> %d\n", status);
466 }
467
468 return ret;
469}
470
471static int __devexit gpio_twl4030_remove(struct platform_device *pdev)
472{
473 struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data;
474 int status;
475
476 if (pdata->teardown) {
477 status = pdata->teardown(&pdev->dev,
478 pdata->gpio_base, TWL4030_GPIO_MAX);
479 if (status) {
480 dev_dbg(&pdev->dev, "teardown --> %d\n", status);
481 return status;
482 }
483 }
484
485 status = gpiochip_remove(&twl_gpiochip);
486 if (status < 0)
487 return status;
488
489 if (is_module())
490 return 0;
491
492 /* REVISIT no support yet for deregistering all the IRQs */
493 WARN_ON(1);
494 return -EIO;
495}
496
497/* Note: this hardware lives inside an I2C-based multi-function device. */
498MODULE_ALIAS("platform:twl4030_gpio");
499
500static struct platform_driver gpio_twl4030_driver = {
501 .driver.name = "twl4030_gpio",
502 .driver.owner = THIS_MODULE,
503 .probe = gpio_twl4030_probe,
504 .remove = __devexit_p(gpio_twl4030_remove),
505};
506
507static int __init gpio_twl4030_init(void)
508{
509 return platform_driver_register(&gpio_twl4030_driver);
510}
511subsys_initcall(gpio_twl4030_init);
512
513static void __exit gpio_twl4030_exit(void)
514{
515 platform_driver_unregister(&gpio_twl4030_driver);
516}
517module_exit(gpio_twl4030_exit);
518
519MODULE_AUTHOR("Texas Instruments, Inc.");
520MODULE_DESCRIPTION("GPIO interface for TWL4030");
521MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
index 1839c57663c5..80be1cab62af 100644
--- a/drivers/gpu/drm/drm_drawable.c
+++ b/drivers/gpu/drm/drm_drawable.c
@@ -76,11 +76,18 @@ int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
76{ 76{
77 struct drm_draw *draw = data; 77 struct drm_draw *draw = data;
78 unsigned long irqflags; 78 unsigned long irqflags;
79 struct drm_drawable_info *info;
79 80
80 spin_lock_irqsave(&dev->drw_lock, irqflags); 81 spin_lock_irqsave(&dev->drw_lock, irqflags);
81 82
82 drm_free(drm_get_drawable_info(dev, draw->handle), 83 info = drm_get_drawable_info(dev, draw->handle);
83 sizeof(struct drm_drawable_info), DRM_MEM_BUFS); 84 if (info == NULL) {
85 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
86 return -EINVAL;
87 }
88 drm_free(info->rects, info->num_rects * sizeof(struct drm_clip_rect),
89 DRM_MEM_BUFS);
90 drm_free(info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
84 91
85 idr_remove(&dev->drw_idr, draw->handle); 92 idr_remove(&dev->drw_idr, draw->handle);
86 93
@@ -111,7 +118,9 @@ int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file
111 118
112 switch (update->type) { 119 switch (update->type) {
113 case DRM_DRAWABLE_CLIPRECTS: 120 case DRM_DRAWABLE_CLIPRECTS:
114 if (update->num != info->num_rects) { 121 if (update->num == 0)
122 rects = NULL;
123 else if (update->num != info->num_rects) {
115 rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), 124 rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
116 DRM_MEM_BUFS); 125 DRM_MEM_BUFS);
117 } else 126 } else
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 90f5a8d9bdcb..920b72fbc958 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -64,6 +64,8 @@
64#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t) 64#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t)
65#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t) 65#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t)
66 66
67#define DRM_IOCTL_UPDATE_DRAW32 DRM_IOW( 0x3f, drm_update_draw32_t)
68
67#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t) 69#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
68 70
69typedef struct drm_version_32 { 71typedef struct drm_version_32 {
@@ -952,6 +954,37 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
952 DRM_IOCTL_SG_FREE, (unsigned long)request); 954 DRM_IOCTL_SG_FREE, (unsigned long)request);
953} 955}
954 956
957typedef struct drm_update_draw32 {
958 drm_drawable_t handle;
959 unsigned int type;
960 unsigned int num;
961 /* 64-bit version has a 32-bit pad here */
962 u64 data; /**< Pointer */
963} __attribute__((packed)) drm_update_draw32_t;
964
965static int compat_drm_update_draw(struct file *file, unsigned int cmd,
966 unsigned long arg)
967{
968 drm_update_draw32_t update32;
969 struct drm_update_draw __user *request;
970 int err;
971
972 if (copy_from_user(&update32, (void __user *)arg, sizeof(update32)))
973 return -EFAULT;
974
975 request = compat_alloc_user_space(sizeof(*request));
976 if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
977 __put_user(update32.handle, &request->handle) ||
978 __put_user(update32.type, &request->type) ||
979 __put_user(update32.num, &request->num) ||
980 __put_user(update32.data, &request->data))
981 return -EFAULT;
982
983 err = drm_ioctl(file->f_path.dentry->d_inode, file,
984 DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
985 return err;
986}
987
955struct drm_wait_vblank_request32 { 988struct drm_wait_vblank_request32 {
956 enum drm_vblank_seq_type type; 989 enum drm_vblank_seq_type type;
957 unsigned int sequence; 990 unsigned int sequence;
@@ -1033,6 +1066,7 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
1033#endif 1066#endif
1034 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc, 1067 [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
1035 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free, 1068 [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
1069 [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
1036 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank, 1070 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
1037}; 1071};
1038 1072
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 4091b9e291f9..212a94f715b2 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -594,11 +594,14 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
594 goto done; 594 goto done;
595 } 595 }
596 596
597 /* Get a refcount on the vblank, which will be released by
598 * drm_vbl_send_signals().
599 */
597 ret = drm_vblank_get(dev, crtc); 600 ret = drm_vblank_get(dev, crtc);
598 if (ret) { 601 if (ret) {
599 drm_free(vbl_sig, sizeof(struct drm_vbl_sig), 602 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
600 DRM_MEM_DRIVER); 603 DRM_MEM_DRIVER);
601 return ret; 604 goto done;
602 } 605 }
603 606
604 atomic_inc(&dev->vbl_signal_pending); 607 atomic_inc(&dev->vbl_signal_pending);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index a4caf95485d7..888159e03d26 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -232,6 +232,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
232 } 232 }
233 return 0; 233 return 0;
234} 234}
235EXPORT_SYMBOL(drm_lock_take);
235 236
236/** 237/**
237 * This takes a lock forcibly and hands it to context. Should ONLY be used 238 * This takes a lock forcibly and hands it to context. Should ONLY be used
@@ -299,6 +300,7 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
299 wake_up_interruptible(&lock_data->lock_queue); 300 wake_up_interruptible(&lock_data->lock_queue);
300 return 0; 301 return 0;
301} 302}
303EXPORT_SYMBOL(drm_lock_free);
302 304
303/** 305/**
304 * If we get here, it means that the process has called DRM_IOCTL_LOCK 306 * If we get here, it means that the process has called DRM_IOCTL_LOCK
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index db34780edbb2..01de536e0211 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -844,8 +844,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
844 * correctly in testing on 945G. 844 * correctly in testing on 945G.
845 * This may be a side effect of MSI having been made available for PEG 845 * This may be a side effect of MSI having been made available for PEG
846 * and the registers being closely associated. 846 * and the registers being closely associated.
847 *
848 * According to chipset errata, on the 965GM, MSI interrupts may
849 * be lost or delayed
847 */ 850 */
848 if (!IS_I945G(dev) && !IS_I945GM(dev)) 851 if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev))
849 if (pci_enable_msi(dev->pdev)) 852 if (pci_enable_msi(dev->pdev))
850 DRM_ERROR("failed to enable MSI\n"); 853 DRM_ERROR("failed to enable MSI\n");
851 854
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index eae4ed3956e0..f20ffe17df71 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -90,7 +90,7 @@ struct mem_block {
90typedef struct _drm_i915_vbl_swap { 90typedef struct _drm_i915_vbl_swap {
91 struct list_head head; 91 struct list_head head;
92 drm_drawable_t drw_id; 92 drm_drawable_t drw_id;
93 unsigned int plane; 93 unsigned int pipe;
94 unsigned int sequence; 94 unsigned int sequence;
95} drm_i915_vbl_swap_t; 95} drm_i915_vbl_swap_t;
96 96
@@ -240,6 +240,9 @@ typedef struct drm_i915_private {
240 u8 saveDACDATA[256*3]; /* 256 3-byte colors */ 240 u8 saveDACDATA[256*3]; /* 256 3-byte colors */
241 u8 saveCR[37]; 241 u8 saveCR[37];
242 242
243 /** Work task for vblank-related ring access */
244 struct work_struct vblank_work;
245
243 struct { 246 struct {
244 struct drm_mm gtt_space; 247 struct drm_mm gtt_space;
245 248
@@ -285,9 +288,6 @@ typedef struct drm_i915_private {
285 */ 288 */
286 struct delayed_work retire_work; 289 struct delayed_work retire_work;
287 290
288 /** Work task for vblank-related ring access */
289 struct work_struct vblank_work;
290
291 uint32_t next_gem_seqno; 291 uint32_t next_gem_seqno;
292 292
293 /** 293 /**
@@ -441,7 +441,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
441void i915_user_irq_get(struct drm_device *dev); 441void i915_user_irq_get(struct drm_device *dev);
442void i915_user_irq_put(struct drm_device *dev); 442void i915_user_irq_put(struct drm_device *dev);
443 443
444extern void i915_gem_vblank_work_handler(struct work_struct *work); 444extern void i915_vblank_work_handler(struct work_struct *work);
445extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 445extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
446extern void i915_driver_irq_preinstall(struct drm_device * dev); 446extern void i915_driver_irq_preinstall(struct drm_device * dev);
447extern int i915_driver_irq_postinstall(struct drm_device *dev); 447extern int i915_driver_irq_postinstall(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dc2e6fdb6ca3..17ae330ff269 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2564,8 +2564,6 @@ i915_gem_load(struct drm_device *dev)
2564 INIT_LIST_HEAD(&dev_priv->mm.request_list); 2564 INIT_LIST_HEAD(&dev_priv->mm.request_list);
2565 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 2565 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2566 i915_gem_retire_work_handler); 2566 i915_gem_retire_work_handler);
2567 INIT_WORK(&dev_priv->mm.vblank_work,
2568 i915_gem_vblank_work_handler);
2569 dev_priv->mm.next_gem_seqno = 1; 2567 dev_priv->mm.next_gem_seqno = 1;
2570 2568
2571 i915_gem_detect_bit_6_swizzle(dev); 2569 i915_gem_detect_bit_6_swizzle(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 15d4160415b0..93de15b4c9a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -192,7 +192,12 @@ static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
192 192
193 *start = &buf[offset]; 193 *start = &buf[offset];
194 *eof = 0; 194 *eof = 0;
195 DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev)); 195 if (dev_priv->hw_status_page != NULL) {
196 DRM_PROC_PRINT("Current sequence: %d\n",
197 i915_get_gem_seqno(dev));
198 } else {
199 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
200 }
196 DRM_PROC_PRINT("Waiter sequence: %d\n", 201 DRM_PROC_PRINT("Waiter sequence: %d\n",
197 dev_priv->mm.waiting_gem_seqno); 202 dev_priv->mm.waiting_gem_seqno);
198 DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); 203 DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
@@ -230,8 +235,12 @@ static int i915_interrupt_info(char *buf, char **start, off_t offset,
230 I915_READ(PIPEBSTAT)); 235 I915_READ(PIPEBSTAT));
231 DRM_PROC_PRINT("Interrupts received: %d\n", 236 DRM_PROC_PRINT("Interrupts received: %d\n",
232 atomic_read(&dev_priv->irq_received)); 237 atomic_read(&dev_priv->irq_received));
233 DRM_PROC_PRINT("Current sequence: %d\n", 238 if (dev_priv->hw_status_page != NULL) {
234 i915_get_gem_seqno(dev)); 239 DRM_PROC_PRINT("Current sequence: %d\n",
240 i915_get_gem_seqno(dev));
241 } else {
242 DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
243 }
235 DRM_PROC_PRINT("Waiter sequence: %d\n", 244 DRM_PROC_PRINT("Waiter sequence: %d\n",
236 dev_priv->mm.waiting_gem_seqno); 245 dev_priv->mm.waiting_gem_seqno);
237 DRM_PROC_PRINT("IRQ sequence: %d\n", 246 DRM_PROC_PRINT("IRQ sequence: %d\n",
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index baae511c785b..26f48932a51e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -60,43 +60,6 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
60} 60}
61 61
62/** 62/**
63 * i915_get_pipe - return the the pipe associated with a given plane
64 * @dev: DRM device
65 * @plane: plane to look for
66 *
67 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
68 * rather than a pipe number, since they may not always be equal. This routine
69 * maps the given @plane back to a pipe number.
70 */
71static int
72i915_get_pipe(struct drm_device *dev, int plane)
73{
74 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
75 u32 dspcntr;
76
77 dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
78
79 return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
80}
81
82/**
83 * i915_get_plane - return the the plane associated with a given pipe
84 * @dev: DRM device
85 * @pipe: pipe to look for
86 *
87 * The Intel Mesa & 2D drivers call the vblank routines with a plane number
88 * rather than a plane number, since they may not always be equal. This routine
89 * maps the given @pipe back to a plane number.
90 */
91static int
92i915_get_plane(struct drm_device *dev, int pipe)
93{
94 if (i915_get_pipe(dev, 0) == pipe)
95 return 0;
96 return 1;
97}
98
99/**
100 * i915_pipe_enabled - check if a pipe is enabled 63 * i915_pipe_enabled - check if a pipe is enabled
101 * @dev: DRM device 64 * @dev: DRM device
102 * @pipe: pipe to check 65 * @pipe: pipe to check
@@ -121,6 +84,9 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
121 * Emit blits for scheduled buffer swaps. 84 * Emit blits for scheduled buffer swaps.
122 * 85 *
123 * This function will be called with the HW lock held. 86 * This function will be called with the HW lock held.
87 * Because this function must grab the ring mutex (dev->struct_mutex),
88 * it can no longer run at soft irq time. We'll fix this when we do
89 * the DRI2 swap buffer work.
124 */ 90 */
125static void i915_vblank_tasklet(struct drm_device *dev) 91static void i915_vblank_tasklet(struct drm_device *dev)
126{ 92{
@@ -141,6 +107,8 @@ static void i915_vblank_tasklet(struct drm_device *dev)
141 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24); 107 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
142 RING_LOCALS; 108 RING_LOCALS;
143 109
110 mutex_lock(&dev->struct_mutex);
111
144 if (IS_I965G(dev) && sarea_priv->front_tiled) { 112 if (IS_I965G(dev) && sarea_priv->front_tiled) {
145 cmd |= XY_SRC_COPY_BLT_DST_TILED; 113 cmd |= XY_SRC_COPY_BLT_DST_TILED;
146 dst_pitch >>= 2; 114 dst_pitch >>= 2;
@@ -165,7 +133,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
165 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { 133 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
166 drm_i915_vbl_swap_t *vbl_swap = 134 drm_i915_vbl_swap_t *vbl_swap =
167 list_entry(list, drm_i915_vbl_swap_t, head); 135 list_entry(list, drm_i915_vbl_swap_t, head);
168 int pipe = i915_get_pipe(dev, vbl_swap->plane); 136 int pipe = vbl_swap->pipe;
169 137
170 if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) 138 if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
171 continue; 139 continue;
@@ -179,20 +147,19 @@ static void i915_vblank_tasklet(struct drm_device *dev)
179 147
180 drw = drm_get_drawable_info(dev, vbl_swap->drw_id); 148 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
181 149
182 if (!drw) {
183 spin_unlock(&dev->drw_lock);
184 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
185 spin_lock(&dev_priv->swaps_lock);
186 continue;
187 }
188
189 list_for_each(hit, &hits) { 150 list_for_each(hit, &hits) {
190 drm_i915_vbl_swap_t *swap_cmp = 151 drm_i915_vbl_swap_t *swap_cmp =
191 list_entry(hit, drm_i915_vbl_swap_t, head); 152 list_entry(hit, drm_i915_vbl_swap_t, head);
192 struct drm_drawable_info *drw_cmp = 153 struct drm_drawable_info *drw_cmp =
193 drm_get_drawable_info(dev, swap_cmp->drw_id); 154 drm_get_drawable_info(dev, swap_cmp->drw_id);
194 155
195 if (drw_cmp && 156 /* Make sure both drawables are still
157 * around and have some rectangles before
158 * we look inside to order them for the
159 * blts below.
160 */
161 if (drw_cmp && drw_cmp->num_rects > 0 &&
162 drw && drw->num_rects > 0 &&
196 drw_cmp->rects[0].y1 > drw->rects[0].y1) { 163 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
197 list_add_tail(list, hit); 164 list_add_tail(list, hit);
198 break; 165 break;
@@ -212,6 +179,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
212 179
213 if (nhits == 0) { 180 if (nhits == 0) {
214 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 181 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
182 mutex_unlock(&dev->struct_mutex);
215 return; 183 return;
216 } 184 }
217 185
@@ -265,18 +233,21 @@ static void i915_vblank_tasklet(struct drm_device *dev)
265 drm_i915_vbl_swap_t *swap_hit = 233 drm_i915_vbl_swap_t *swap_hit =
266 list_entry(hit, drm_i915_vbl_swap_t, head); 234 list_entry(hit, drm_i915_vbl_swap_t, head);
267 struct drm_clip_rect *rect; 235 struct drm_clip_rect *rect;
268 int num_rects, plane; 236 int num_rects, pipe;
269 unsigned short top, bottom; 237 unsigned short top, bottom;
270 238
271 drw = drm_get_drawable_info(dev, swap_hit->drw_id); 239 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
272 240
241 /* The drawable may have been destroyed since
242 * the vblank swap was queued
243 */
273 if (!drw) 244 if (!drw)
274 continue; 245 continue;
275 246
276 rect = drw->rects; 247 rect = drw->rects;
277 plane = swap_hit->plane; 248 pipe = swap_hit->pipe;
278 top = upper[plane]; 249 top = upper[pipe];
279 bottom = lower[plane]; 250 bottom = lower[pipe];
280 251
281 for (num_rects = drw->num_rects; num_rects--; rect++) { 252 for (num_rects = drw->num_rects; num_rects--; rect++) {
282 int y1 = max(rect->y1, top); 253 int y1 = max(rect->y1, top);
@@ -302,6 +273,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
302 } 273 }
303 274
304 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 275 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
276 mutex_unlock(&dev->struct_mutex);
305 277
306 list_for_each_safe(hit, tmp, &hits) { 278 list_for_each_safe(hit, tmp, &hits) {
307 drm_i915_vbl_swap_t *swap_hit = 279 drm_i915_vbl_swap_t *swap_hit =
@@ -313,15 +285,16 @@ static void i915_vblank_tasklet(struct drm_device *dev)
313 } 285 }
314} 286}
315 287
316u32 i915_get_vblank_counter(struct drm_device *dev, int plane) 288/* Called from drm generic code, passed a 'crtc', which
289 * we use as a pipe index
290 */
291u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
317{ 292{
318 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 293 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
319 unsigned long high_frame; 294 unsigned long high_frame;
320 unsigned long low_frame; 295 unsigned long low_frame;
321 u32 high1, high2, low, count; 296 u32 high1, high2, low, count;
322 int pipe;
323 297
324 pipe = i915_get_pipe(dev, plane);
325 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; 298 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
326 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; 299 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
327 300
@@ -350,18 +323,37 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
350} 323}
351 324
352void 325void
353i915_gem_vblank_work_handler(struct work_struct *work) 326i915_vblank_work_handler(struct work_struct *work)
354{ 327{
355 drm_i915_private_t *dev_priv; 328 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
356 struct drm_device *dev; 329 vblank_work);
330 struct drm_device *dev = dev_priv->dev;
331 unsigned long irqflags;
357 332
358 dev_priv = container_of(work, drm_i915_private_t, 333 if (dev->lock.hw_lock == NULL) {
359 mm.vblank_work); 334 i915_vblank_tasklet(dev);
360 dev = dev_priv->dev; 335 return;
336 }
337
338 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
339 dev->locked_tasklet_func = i915_vblank_tasklet;
340 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
341
342 /* Try to get the lock now, if this fails, the lock
343 * holder will execute the tasklet during unlock
344 */
345 if (!drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT))
346 return;
347
348 dev->lock.lock_time = jiffies;
349 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
350
351 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
352 dev->locked_tasklet_func = NULL;
353 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
361 354
362 mutex_lock(&dev->struct_mutex);
363 i915_vblank_tasklet(dev); 355 i915_vblank_tasklet(dev);
364 mutex_unlock(&dev->struct_mutex); 356 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
365} 357}
366 358
367irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 359irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
@@ -398,7 +390,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
398 else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| 390 else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
399 PIPE_VBLANK_INTERRUPT_STATUS)) { 391 PIPE_VBLANK_INTERRUPT_STATUS)) {
400 vblank++; 392 vblank++;
401 drm_handle_vblank(dev, i915_get_plane(dev, 0)); 393 drm_handle_vblank(dev, 0);
402 } 394 }
403 395
404 I915_WRITE(PIPEASTAT, pipea_stats); 396 I915_WRITE(PIPEASTAT, pipea_stats);
@@ -416,7 +408,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
416 else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| 408 else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
417 PIPE_VBLANK_INTERRUPT_STATUS)) { 409 PIPE_VBLANK_INTERRUPT_STATUS)) {
418 vblank++; 410 vblank++;
419 drm_handle_vblank(dev, i915_get_plane(dev, 1)); 411 drm_handle_vblank(dev, 1);
420 } 412 }
421 413
422 if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) 414 if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
@@ -441,12 +433,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
441 if (iir & I915_ASLE_INTERRUPT) 433 if (iir & I915_ASLE_INTERRUPT)
442 opregion_asle_intr(dev); 434 opregion_asle_intr(dev);
443 435
444 if (vblank && dev_priv->swaps_pending > 0) { 436 if (vblank && dev_priv->swaps_pending > 0)
445 if (dev_priv->ring.ring_obj == NULL) 437 schedule_work(&dev_priv->vblank_work);
446 drm_locked_tasklet(dev, i915_vblank_tasklet);
447 else
448 schedule_work(&dev_priv->mm.vblank_work);
449 }
450 438
451 return IRQ_HANDLED; 439 return IRQ_HANDLED;
452} 440}
@@ -481,22 +469,24 @@ static int i915_emit_irq(struct drm_device * dev)
481void i915_user_irq_get(struct drm_device *dev) 469void i915_user_irq_get(struct drm_device *dev)
482{ 470{
483 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 471 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
472 unsigned long irqflags;
484 473
485 spin_lock(&dev_priv->user_irq_lock); 474 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
486 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) 475 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
487 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 476 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
488 spin_unlock(&dev_priv->user_irq_lock); 477 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
489} 478}
490 479
491void i915_user_irq_put(struct drm_device *dev) 480void i915_user_irq_put(struct drm_device *dev)
492{ 481{
493 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 482 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
483 unsigned long irqflags;
494 484
495 spin_lock(&dev_priv->user_irq_lock); 485 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
496 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 486 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
497 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) 487 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
498 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 488 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
499 spin_unlock(&dev_priv->user_irq_lock); 489 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
500} 490}
501 491
502static int i915_wait_irq(struct drm_device * dev, int irq_nr) 492static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -578,74 +568,95 @@ int i915_irq_wait(struct drm_device *dev, void *data,
578 return i915_wait_irq(dev, irqwait->irq_seq); 568 return i915_wait_irq(dev, irqwait->irq_seq);
579} 569}
580 570
581int i915_enable_vblank(struct drm_device *dev, int plane) 571/* Called from drm generic code, passed 'crtc' which
572 * we use as a pipe index
573 */
574int i915_enable_vblank(struct drm_device *dev, int pipe)
582{ 575{
583 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 576 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
584 int pipe = i915_get_pipe(dev, plane);
585 u32 pipestat_reg = 0; 577 u32 pipestat_reg = 0;
586 u32 pipestat; 578 u32 pipestat;
579 u32 interrupt = 0;
580 unsigned long irqflags;
587 581
588 switch (pipe) { 582 switch (pipe) {
589 case 0: 583 case 0:
590 pipestat_reg = PIPEASTAT; 584 pipestat_reg = PIPEASTAT;
591 i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT); 585 interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
592 break; 586 break;
593 case 1: 587 case 1:
594 pipestat_reg = PIPEBSTAT; 588 pipestat_reg = PIPEBSTAT;
595 i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 589 interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
596 break; 590 break;
597 default: 591 default:
598 DRM_ERROR("tried to enable vblank on non-existent pipe %d\n", 592 DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
599 pipe); 593 pipe);
600 break; 594 return 0;
601 } 595 }
602 596
603 if (pipestat_reg) { 597 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
604 pipestat = I915_READ(pipestat_reg); 598 /* Enabling vblank events in IMR comes before PIPESTAT write, or
605 if (IS_I965G(dev)) 599 * there's a race where the PIPESTAT vblank bit gets set to 1, so
606 pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE; 600 * the OR of enabled PIPESTAT bits goes to 1, so the PIPExEVENT in
607 else 601 * ISR flashes to 1, but the IIR bit doesn't get set to 1 because
608 pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE; 602 * IMR masks it. It doesn't ever get set after we clear the masking
609 /* Clear any stale interrupt status */ 603 * in IMR because the ISR bit is edge, not level-triggered, on the
610 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | 604 * OR of PIPESTAT bits.
611 PIPE_VBLANK_INTERRUPT_STATUS); 605 */
612 I915_WRITE(pipestat_reg, pipestat); 606 i915_enable_irq(dev_priv, interrupt);
613 } 607 pipestat = I915_READ(pipestat_reg);
608 if (IS_I965G(dev))
609 pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
610 else
611 pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
612 /* Clear any stale interrupt status */
613 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
614 PIPE_VBLANK_INTERRUPT_STATUS);
615 I915_WRITE(pipestat_reg, pipestat);
616 (void) I915_READ(pipestat_reg); /* Posting read */
617 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
614 618
615 return 0; 619 return 0;
616} 620}
617 621
618void i915_disable_vblank(struct drm_device *dev, int plane) 622/* Called from drm generic code, passed 'crtc' which
623 * we use as a pipe index
624 */
625void i915_disable_vblank(struct drm_device *dev, int pipe)
619{ 626{
620 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 627 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
621 int pipe = i915_get_pipe(dev, plane);
622 u32 pipestat_reg = 0; 628 u32 pipestat_reg = 0;
623 u32 pipestat; 629 u32 pipestat;
630 u32 interrupt = 0;
631 unsigned long irqflags;
624 632
625 switch (pipe) { 633 switch (pipe) {
626 case 0: 634 case 0:
627 pipestat_reg = PIPEASTAT; 635 pipestat_reg = PIPEASTAT;
628 i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT); 636 interrupt = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
629 break; 637 break;
630 case 1: 638 case 1:
631 pipestat_reg = PIPEBSTAT; 639 pipestat_reg = PIPEBSTAT;
632 i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 640 interrupt = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
633 break; 641 break;
634 default: 642 default:
635 DRM_ERROR("tried to disable vblank on non-existent pipe %d\n", 643 DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
636 pipe); 644 pipe);
645 return;
637 break; 646 break;
638 } 647 }
639 648
640 if (pipestat_reg) { 649 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
641 pipestat = I915_READ(pipestat_reg); 650 i915_disable_irq(dev_priv, interrupt);
642 pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | 651 pipestat = I915_READ(pipestat_reg);
643 PIPE_VBLANK_INTERRUPT_ENABLE); 652 pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
644 /* Clear any stale interrupt status */ 653 PIPE_VBLANK_INTERRUPT_ENABLE);
645 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | 654 /* Clear any stale interrupt status */
646 PIPE_VBLANK_INTERRUPT_STATUS); 655 pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
647 I915_WRITE(pipestat_reg, pipestat); 656 PIPE_VBLANK_INTERRUPT_STATUS);
648 } 657 I915_WRITE(pipestat_reg, pipestat);
658 (void) I915_READ(pipestat_reg); /* Posting read */
659 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
649} 660}
650 661
651/* Set the vblank monitor pipe 662/* Set the vblank monitor pipe
@@ -687,8 +698,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
687{ 698{
688 drm_i915_private_t *dev_priv = dev->dev_private; 699 drm_i915_private_t *dev_priv = dev->dev_private;
689 drm_i915_vblank_swap_t *swap = data; 700 drm_i915_vblank_swap_t *swap = data;
690 drm_i915_vbl_swap_t *vbl_swap; 701 drm_i915_vbl_swap_t *vbl_swap, *vbl_old;
691 unsigned int pipe, seqtype, curseq, plane; 702 unsigned int pipe, seqtype, curseq;
692 unsigned long irqflags; 703 unsigned long irqflags;
693 struct list_head *list; 704 struct list_head *list;
694 int ret; 705 int ret;
@@ -709,8 +720,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
709 return -EINVAL; 720 return -EINVAL;
710 } 721 }
711 722
712 plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 723 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
713 pipe = i915_get_pipe(dev, plane);
714 724
715 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 725 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
716 726
@@ -751,44 +761,52 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
751 } 761 }
752 } 762 }
753 763
764 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
765
766 if (!vbl_swap) {
767 DRM_ERROR("Failed to allocate memory to queue swap\n");
768 drm_vblank_put(dev, pipe);
769 return -ENOMEM;
770 }
771
772 vbl_swap->drw_id = swap->drawable;
773 vbl_swap->pipe = pipe;
774 vbl_swap->sequence = swap->sequence;
775
754 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); 776 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
755 777
756 list_for_each(list, &dev_priv->vbl_swaps.head) { 778 list_for_each(list, &dev_priv->vbl_swaps.head) {
757 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); 779 vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
758 780
759 if (vbl_swap->drw_id == swap->drawable && 781 if (vbl_old->drw_id == swap->drawable &&
760 vbl_swap->plane == plane && 782 vbl_old->pipe == pipe &&
761 vbl_swap->sequence == swap->sequence) { 783 vbl_old->sequence == swap->sequence) {
762 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 784 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
785 drm_vblank_put(dev, pipe);
786 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
763 DRM_DEBUG("Already scheduled\n"); 787 DRM_DEBUG("Already scheduled\n");
764 return 0; 788 return 0;
765 } 789 }
766 } 790 }
767 791
768 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 792 if (dev_priv->swaps_pending >= 10) {
769
770 if (dev_priv->swaps_pending >= 100) {
771 DRM_DEBUG("Too many swaps queued\n"); 793 DRM_DEBUG("Too many swaps queued\n");
794 DRM_DEBUG(" pipe 0: %d pipe 1: %d\n",
795 drm_vblank_count(dev, 0),
796 drm_vblank_count(dev, 1));
797
798 list_for_each(list, &dev_priv->vbl_swaps.head) {
799 vbl_old = list_entry(list, drm_i915_vbl_swap_t, head);
800 DRM_DEBUG("\tdrw %x pipe %d seq %x\n",
801 vbl_old->drw_id, vbl_old->pipe,
802 vbl_old->sequence);
803 }
804 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
772 drm_vblank_put(dev, pipe); 805 drm_vblank_put(dev, pipe);
806 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
773 return -EBUSY; 807 return -EBUSY;
774 } 808 }
775 809
776 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
777
778 if (!vbl_swap) {
779 DRM_ERROR("Failed to allocate memory to queue swap\n");
780 drm_vblank_put(dev, pipe);
781 return -ENOMEM;
782 }
783
784 DRM_DEBUG("\n");
785
786 vbl_swap->drw_id = swap->drawable;
787 vbl_swap->plane = plane;
788 vbl_swap->sequence = swap->sequence;
789
790 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
791
792 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head); 810 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
793 dev_priv->swaps_pending++; 811 dev_priv->swaps_pending++;
794 812
@@ -815,6 +833,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
815 833
816 spin_lock_init(&dev_priv->swaps_lock); 834 spin_lock_init(&dev_priv->swaps_lock);
817 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); 835 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
836 INIT_WORK(&dev_priv->vblank_work, i915_vblank_work_handler);
818 dev_priv->swaps_pending = 0; 837 dev_priv->swaps_pending = 0;
819 838
820 /* Set initial unmasked IRQs to just the selected vblank pipes. */ 839 /* Set initial unmasked IRQs to just the selected vblank pipes. */
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 1e328d19cd6d..3e01992230b8 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -135,7 +135,7 @@ static int wait_for_pin(struct i2c_algo_pcf_data *adap, int *status) {
135 *status = get_pcf(adap, 1); 135 *status = get_pcf(adap, 1);
136#ifndef STUB_I2C 136#ifndef STUB_I2C
137 while (timeout-- && (*status & I2C_PCF_PIN)) { 137 while (timeout-- && (*status & I2C_PCF_PIN)) {
138 adap->waitforpin(); 138 adap->waitforpin(adap->data);
139 *status = get_pcf(adap, 1); 139 *status = get_pcf(adap, 1);
140 } 140 }
141 if (*status & I2C_PCF_LAB) { 141 if (*status & I2C_PCF_LAB) {
@@ -208,7 +208,7 @@ static int pcf_init_8584 (struct i2c_algo_pcf_data *adap)
208 return -ENXIO; 208 return -ENXIO;
209 } 209 }
210 210
211 printk(KERN_DEBUG "i2c-algo-pcf.o: deteted and initialized PCF8584.\n"); 211 printk(KERN_DEBUG "i2c-algo-pcf.o: detected and initialized PCF8584.\n");
212 212
213 return 0; 213 return 0;
214} 214}
@@ -331,13 +331,16 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
331 int i; 331 int i;
332 int ret=0, timeout, status; 332 int ret=0, timeout, status;
333 333
334 if (adap->xfer_begin)
335 adap->xfer_begin(adap->data);
334 336
335 /* Check for bus busy */ 337 /* Check for bus busy */
336 timeout = wait_for_bb(adap); 338 timeout = wait_for_bb(adap);
337 if (timeout) { 339 if (timeout) {
338 DEB2(printk(KERN_ERR "i2c-algo-pcf.o: " 340 DEB2(printk(KERN_ERR "i2c-algo-pcf.o: "
339 "Timeout waiting for BB in pcf_xfer\n");) 341 "Timeout waiting for BB in pcf_xfer\n");)
340 return -EIO; 342 i = -EIO;
343 goto out;
341 } 344 }
342 345
343 for (i = 0;ret >= 0 && i < num; i++) { 346 for (i = 0;ret >= 0 && i < num; i++) {
@@ -359,12 +362,14 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
359 if (timeout) { 362 if (timeout) {
360 if (timeout == -EINTR) { 363 if (timeout == -EINTR) {
361 /* arbitration lost */ 364 /* arbitration lost */
362 return (-EINTR); 365 i = -EINTR;
366 goto out;
363 } 367 }
364 i2c_stop(adap); 368 i2c_stop(adap);
365 DEB2(printk(KERN_ERR "i2c-algo-pcf.o: Timeout waiting " 369 DEB2(printk(KERN_ERR "i2c-algo-pcf.o: Timeout waiting "
366 "for PIN(1) in pcf_xfer\n");) 370 "for PIN(1) in pcf_xfer\n");)
367 return (-EREMOTEIO); 371 i = -EREMOTEIO;
372 goto out;
368 } 373 }
369 374
370#ifndef STUB_I2C 375#ifndef STUB_I2C
@@ -372,7 +377,8 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
372 if (status & I2C_PCF_LRB) { 377 if (status & I2C_PCF_LRB) {
373 i2c_stop(adap); 378 i2c_stop(adap);
374 DEB2(printk(KERN_ERR "i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");) 379 DEB2(printk(KERN_ERR "i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");)
375 return (-EREMOTEIO); 380 i = -EREMOTEIO;
381 goto out;
376 } 382 }
377#endif 383#endif
378 384
@@ -404,6 +410,9 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
404 } 410 }
405 } 411 }
406 412
413out:
414 if (adap->xfer_end)
415 adap->xfer_end(adap->data);
407 return (i); 416 return (i);
408} 417}
409 418
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index acadbc51fc0f..7f95905bbb9d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -97,6 +97,7 @@ config I2C_I801
97 ICH9 97 ICH9
98 Tolapai 98 Tolapai
99 ICH10 99 ICH10
100 PCH
100 101
101 This driver can also be built as a module. If so, the module 102 This driver can also be built as a module. If so, the module
102 will be called i2c-i801. 103 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 8164de1f4d72..228f75723063 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -423,7 +423,6 @@ static const struct i2c_adapter cpm_ops = {
423 .owner = THIS_MODULE, 423 .owner = THIS_MODULE,
424 .name = "i2c-cpm", 424 .name = "i2c-cpm",
425 .algo = &cpm_i2c_algo, 425 .algo = &cpm_i2c_algo,
426 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
427}; 426};
428 427
429static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm) 428static int __devinit cpm_i2c_setup(struct cpm_i2c *cpm)
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 7f38c01fb3a0..0ed3ccb81b63 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -104,7 +104,8 @@ static int pcf_isa_getclock(void *data)
104 return (clock); 104 return (clock);
105} 105}
106 106
107static void pcf_isa_waitforpin(void) { 107static void pcf_isa_waitforpin(void *data)
108{
108 DEFINE_WAIT(wait); 109 DEFINE_WAIT(wait);
109 int timeout = 2; 110 int timeout = 2;
110 unsigned long flags; 111 unsigned long flags;
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 1098f21ace13..648aa7baff83 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -123,7 +123,7 @@ static int __devinit hydra_probe(struct pci_dev *dev,
123 hydra_adap.name)) 123 hydra_adap.name))
124 return -EBUSY; 124 return -EBUSY;
125 125
126 hydra_bit_data.data = ioremap(base, pci_resource_len(dev, 0)); 126 hydra_bit_data.data = pci_ioremap_bar(dev, 0);
127 if (hydra_bit_data.data == NULL) { 127 if (hydra_bit_data.data == NULL) {
128 release_mem_region(base+offsetof(struct Hydra, CachePD), 4); 128 release_mem_region(base+offsetof(struct Hydra, CachePD), 4);
129 return -ENODEV; 129 return -ENODEV;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index dc7ea32b69a8..5123eb69a971 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -41,6 +41,7 @@
41 Tolapai 0x5032 32 hard yes yes yes 41 Tolapai 0x5032 32 hard yes yes yes
42 ICH10 0x3a30 32 hard yes yes yes 42 ICH10 0x3a30 32 hard yes yes yes
43 ICH10 0x3a60 32 hard yes yes yes 43 ICH10 0x3a60 32 hard yes yes yes
44 PCH 0x3b30 32 hard yes yes yes
44 45
45 Features supported by this driver: 46 Features supported by this driver:
46 Software PEC no 47 Software PEC no
@@ -576,6 +577,7 @@ static struct pci_device_id i801_ids[] = {
576 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) }, 577 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) },
577 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) }, 578 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
578 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) }, 579 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
580 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
579 { 0, } 581 { 0, }
580}; 582};
581 583
@@ -599,6 +601,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
599 case PCI_DEVICE_ID_INTEL_TOLAPAI_1: 601 case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
600 case PCI_DEVICE_ID_INTEL_ICH10_4: 602 case PCI_DEVICE_ID_INTEL_ICH10_4:
601 case PCI_DEVICE_ID_INTEL_ICH10_5: 603 case PCI_DEVICE_ID_INTEL_ICH10_5:
604 case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
602 i801_features |= FEATURE_I2C_BLOCK_READ; 605 i801_features |= FEATURE_I2C_BLOCK_READ;
603 /* fall through */ 606 /* fall through */
604 case PCI_DEVICE_ID_INTEL_82801DB_3: 607 case PCI_DEVICE_ID_INTEL_82801DB_3:
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 17356827b93d..4c35702830ce 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -1,6 +1,8 @@
1# 1#
2# Miscellaneous I2C chip drivers configuration 2# Miscellaneous I2C chip drivers configuration
3# 3#
4# *** DEPRECATED! Do not add new entries! See Makefile ***
5#
4 6
5menu "Miscellaneous I2C Chip support" 7menu "Miscellaneous I2C Chip support"
6 8
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index ca520fa143d6..23d2a31b0a64 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -1,7 +1,8 @@
1# 1#
2# Makefile for miscellaneous I2C chip drivers. 2# Makefile for miscellaneous I2C chip drivers.
3# 3#
4# Think twice before you add a new driver to this directory. 4# Do not add new drivers to this directory! It is DEPRECATED.
5#
5# Device drivers are better grouped according to the functionality they 6# Device drivers are better grouped according to the functionality they
6# implement rather than to the bus they are connected to. In particular: 7# implement rather than to the bus they are connected to. In particular:
7# * Hardware monitoring chip drivers go to drivers/hwmon 8# * Hardware monitoring chip drivers go to drivers/hwmon
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 42e852d79ffa..5a485c22660a 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -266,6 +266,9 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
266 266
267 client->dev.platform_data = info->platform_data; 267 client->dev.platform_data = info->platform_data;
268 268
269 if (info->archdata)
270 client->dev.archdata = *info->archdata;
271
269 client->flags = info->flags; 272 client->flags = info->flags;
270 client->addr = info->addr; 273 client->addr = info->addr;
271 client->irq = info->irq; 274 client->irq = info->irq;
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 093d3248ca89..9cf92ac939d2 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -18,22 +18,66 @@ ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o
18 18
19obj-$(CONFIG_IDE) += ide-core.o 19obj-$(CONFIG_IDE) += ide-core.o
20 20
21ifeq ($(CONFIG_IDE_ARM), y) 21obj-$(CONFIG_IDE_ARM) += ide_arm.o
22 ide-arm-core-y += arm/ide_arm.o 22
23 obj-y += ide-arm-core.o 23obj-$(CONFIG_BLK_DEV_ALI14XX) += ali14xx.o
24endif 24obj-$(CONFIG_BLK_DEV_UMC8672) += umc8672.o
25 25obj-$(CONFIG_BLK_DEV_DTC2278) += dtc2278.o
26obj-$(CONFIG_IDE) += legacy/ pci/ 26obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o
27obj-$(CONFIG_BLK_DEV_QD65XX) += qd65xx.o
28obj-$(CONFIG_BLK_DEV_4DRIVES) += ide-4drives.o
29
30obj-$(CONFIG_BLK_DEV_GAYLE) += gayle.o
31obj-$(CONFIG_BLK_DEV_FALCON_IDE) += falconide.o
32obj-$(CONFIG_BLK_DEV_MAC_IDE) += macide.o
33obj-$(CONFIG_BLK_DEV_Q40IDE) += q40ide.o
34obj-$(CONFIG_BLK_DEV_BUDDHA) += buddha.o
35
36obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o
37obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o
38obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o
39obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o
40obj-$(CONFIG_BLK_DEV_CELLEB) += scc_pata.o
41obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o
42obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o
43obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
44obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o
45obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o
46obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o
47obj-$(CONFIG_BLK_DEV_DELKIN) += delkin_cb.o
48obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
49obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o
50obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o
51obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o
52obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o
53obj-$(CONFIG_BLK_DEV_OPTI621) += opti621.o
54obj-$(CONFIG_BLK_DEV_PDC202XX_OLD) += pdc202xx_old.o
55obj-$(CONFIG_BLK_DEV_PDC202XX_NEW) += pdc202xx_new.o
56obj-$(CONFIG_BLK_DEV_PIIX) += piix.o
57obj-$(CONFIG_BLK_DEV_RZ1000) += rz1000.o
58obj-$(CONFIG_BLK_DEV_SVWKS) += serverworks.o
59obj-$(CONFIG_BLK_DEV_SGIIOC4) += sgiioc4.o
60obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o
61obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o
62obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o
63obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o
64obj-$(CONFIG_BLK_DEV_TC86C001) += tc86c001.o
65obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o
66obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o
67obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o
68
69# Must appear at the end of the block
70obj-$(CONFIG_BLK_DEV_GENERIC) += ide-pci-generic.o
71ide-pci-generic-y += generic.o
27 72
28obj-$(CONFIG_IDEPCI_PCIBUS_ORDER) += ide-scan-pci.o 73obj-$(CONFIG_IDEPCI_PCIBUS_ORDER) += ide-scan-pci.o
29 74
30ifeq ($(CONFIG_BLK_DEV_CMD640), y) 75obj-$(CONFIG_BLK_DEV_CMD640) += cmd640.o
31 cmd640-core-y += pci/cmd640.o 76
32 obj-y += cmd640-core.o 77obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o
33endif 78
79obj-$(CONFIG_IDE_H8300) += ide-h8300.o
34 80
35obj-$(CONFIG_IDE) += ppc/
36obj-$(CONFIG_IDE_H8300) += h8300/
37obj-$(CONFIG_IDE_GENERIC) += ide-generic.o 81obj-$(CONFIG_IDE_GENERIC) += ide-generic.o
38obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o 82obj-$(CONFIG_BLK_DEV_IDEPNP) += ide-pnp.o
39 83
@@ -58,14 +102,12 @@ obj-$(CONFIG_IDE_GD) += ide-gd_mod.o
58obj-$(CONFIG_BLK_DEV_IDECD) += ide-cd_mod.o 102obj-$(CONFIG_BLK_DEV_IDECD) += ide-cd_mod.o
59obj-$(CONFIG_BLK_DEV_IDETAPE) += ide-tape.o 103obj-$(CONFIG_BLK_DEV_IDETAPE) += ide-tape.o
60 104
61ifeq ($(CONFIG_BLK_DEV_IDECS), y) 105obj-$(CONFIG_BLK_DEV_IDECS) += ide-cs.o
62 ide-cs-core-y += legacy/ide-cs.o
63 obj-y += ide-cs-core.o
64endif
65 106
66ifeq ($(CONFIG_BLK_DEV_PLATFORM), y) 107obj-$(CONFIG_BLK_DEV_PLATFORM) += ide_platform.o
67 ide-platform-core-y += legacy/ide_platform.o 108
68 obj-y += ide-platform-core.o 109obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
69endif 110obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
111obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
70 112
71obj-$(CONFIG_IDE) += arm/ mips/ 113obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/aec62xx.c
index 4142c698e0d3..4142c698e0d3 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/aec62xx.c
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/ali14xx.c
index 90da1f953ed0..90da1f953ed0 100644
--- a/drivers/ide/legacy/ali14xx.c
+++ b/drivers/ide/ali14xx.c
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/alim15x3.c
index daf9dce39e52..daf9dce39e52 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/alim15x3.c
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/amd74xx.c
index 81ec73134eda..81ec73134eda 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/amd74xx.c
diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile
deleted file mode 100644
index 5bc26053afa6..000000000000
--- a/drivers/ide/arm/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
1
2obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
3obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
4obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
5
6ifeq ($(CONFIG_IDE_ARM), m)
7 obj-m += ide_arm.o
8endif
9
10EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/atiixp.c
index b2735d28f5cc..b2735d28f5cc 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/atiixp.c
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 0ec8fd1e4dcb..0ec8fd1e4dcb 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/buddha.c
index c5a3c9ef6a5d..c5a3c9ef6a5d 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/buddha.c
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/cmd640.c
index e4306647d00d..e4306647d00d 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/cmd640.c
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/cmd64x.c
index 935385c77e06..935385c77e06 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/cmd64x.c
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/cs5520.c
index 5efb467f8fa0..5efb467f8fa0 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/cs5520.c
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/cs5530.c
index 53f079cc00af..53f079cc00af 100644
--- a/drivers/ide/pci/cs5530.c
+++ b/drivers/ide/cs5530.c
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/cs5535.c
index 983d957a0189..983d957a0189 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/cs5535.c
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/cy82c693.c
index 5297f07d2933..5297f07d2933 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/cy82c693.c
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/delkin_cb.c
index 8f1b2d9f0513..8f1b2d9f0513 100644
--- a/drivers/ide/pci/delkin_cb.c
+++ b/drivers/ide/delkin_cb.c
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/dtc2278.c
index 689b2e493413..689b2e493413 100644
--- a/drivers/ide/legacy/dtc2278.c
+++ b/drivers/ide/dtc2278.c
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/falconide.c
index 39d500d84b07..39d500d84b07 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/falconide.c
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/gayle.c
index 691506886561..691506886561 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/gayle.c
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/generic.c
index 474f96a7c076..474f96a7c076 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/generic.c
diff --git a/drivers/ide/h8300/Makefile b/drivers/ide/h8300/Makefile
deleted file mode 100644
index 5eba16f423f4..000000000000
--- a/drivers/ide/h8300/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1
2obj-$(CONFIG_IDE_H8300) += ide-h8300.o
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/hpt366.c
index a7909e9c720e..a7909e9c720e 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/hpt366.c
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/ht6560b.c
index c7e5c2246b79..c7e5c2246b79 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/ht6560b.c
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/icside.c
index 76bdc9a27f6f..76bdc9a27f6f 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/icside.c
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/ide-4drives.c
index 9e85b1ec9607..9e85b1ec9607 100644
--- a/drivers/ide/legacy/ide-4drives.c
+++ b/drivers/ide/ide-4drives.c
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/ide-cs.c
index cb199c815b53..cb199c815b53 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/ide-cs.c
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/ide-h8300.c
index e2cdd2e9cdec..e2cdd2e9cdec 100644
--- a/drivers/ide/h8300/ide-h8300.c
+++ b/drivers/ide/ide-h8300.c
diff --git a/drivers/ide/arm/ide_arm.c b/drivers/ide/ide_arm.c
index f728f2927b5a..f728f2927b5a 100644
--- a/drivers/ide/arm/ide_arm.c
+++ b/drivers/ide/ide_arm.c
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/ide_platform.c
index 051b4ab0f359..051b4ab0f359 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/ide_platform.c
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/it8213.c
index 7c2feeb3c5ec..7c2feeb3c5ec 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/it8213.c
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/it821x.c
index 995e18bb3139..995e18bb3139 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/it821x.c
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/jmicron.c
index 9a68433cf46d..9a68433cf46d 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/jmicron.c
diff --git a/drivers/ide/legacy/Makefile b/drivers/ide/legacy/Makefile
deleted file mode 100644
index 6939329f89e8..000000000000
--- a/drivers/ide/legacy/Makefile
+++ /dev/null
@@ -1,25 +0,0 @@
1
2# link order is important here
3
4obj-$(CONFIG_BLK_DEV_ALI14XX) += ali14xx.o
5obj-$(CONFIG_BLK_DEV_UMC8672) += umc8672.o
6obj-$(CONFIG_BLK_DEV_DTC2278) += dtc2278.o
7obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o
8obj-$(CONFIG_BLK_DEV_QD65XX) += qd65xx.o
9obj-$(CONFIG_BLK_DEV_4DRIVES) += ide-4drives.o
10
11obj-$(CONFIG_BLK_DEV_GAYLE) += gayle.o
12obj-$(CONFIG_BLK_DEV_FALCON_IDE) += falconide.o
13obj-$(CONFIG_BLK_DEV_MAC_IDE) += macide.o
14obj-$(CONFIG_BLK_DEV_Q40IDE) += q40ide.o
15obj-$(CONFIG_BLK_DEV_BUDDHA) += buddha.o
16
17ifeq ($(CONFIG_BLK_DEV_IDECS), m)
18 obj-m += ide-cs.o
19endif
20
21ifeq ($(CONFIG_BLK_DEV_PLATFORM), m)
22 obj-m += ide_platform.o
23endif
24
25EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/macide.c
index 43f97cc1d30e..43f97cc1d30e 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/macide.c
diff --git a/drivers/ide/mips/Makefile b/drivers/ide/mips/Makefile
deleted file mode 100644
index 5873fa0b8769..000000000000
--- a/drivers/ide/mips/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o
2
3EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/ns87415.c
index 13789060f407..13789060f407 100644
--- a/drivers/ide/pci/ns87415.c
+++ b/drivers/ide/ns87415.c
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/opti621.c
index 6048eda3cd61..6048eda3cd61 100644
--- a/drivers/ide/pci/opti621.c
+++ b/drivers/ide/opti621.c
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 122ed3c072fd..122ed3c072fd 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
deleted file mode 100644
index ab44a1f5f5a9..000000000000
--- a/drivers/ide/pci/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
1
2obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o
3obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o
4obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o
5obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o
6obj-$(CONFIG_BLK_DEV_CELLEB) += scc_pata.o
7obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o
8obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o
9obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
10obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o
11obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o
12obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o
13obj-$(CONFIG_BLK_DEV_DELKIN) += delkin_cb.o
14obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
15obj-$(CONFIG_BLK_DEV_IT8213) += it8213.o
16obj-$(CONFIG_BLK_DEV_IT821X) += it821x.o
17obj-$(CONFIG_BLK_DEV_JMICRON) += jmicron.o
18obj-$(CONFIG_BLK_DEV_NS87415) += ns87415.o
19obj-$(CONFIG_BLK_DEV_OPTI621) += opti621.o
20obj-$(CONFIG_BLK_DEV_PDC202XX_OLD) += pdc202xx_old.o
21obj-$(CONFIG_BLK_DEV_PDC202XX_NEW) += pdc202xx_new.o
22obj-$(CONFIG_BLK_DEV_PIIX) += piix.o
23obj-$(CONFIG_BLK_DEV_RZ1000) += rz1000.o
24obj-$(CONFIG_BLK_DEV_SVWKS) += serverworks.o
25obj-$(CONFIG_BLK_DEV_SGIIOC4) += sgiioc4.o
26obj-$(CONFIG_BLK_DEV_SIIMAGE) += siimage.o
27obj-$(CONFIG_BLK_DEV_SIS5513) += sis5513.o
28obj-$(CONFIG_BLK_DEV_SL82C105) += sl82c105.o
29obj-$(CONFIG_BLK_DEV_SLC90E66) += slc90e66.o
30obj-$(CONFIG_BLK_DEV_TC86C001) += tc86c001.o
31obj-$(CONFIG_BLK_DEV_TRIFLEX) += triflex.o
32obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o
33obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o
34
35# Must appear at the end of the block
36obj-$(CONFIG_BLK_DEV_GENERIC) += ide-pci-generic.o
37ide-pci-generic-y += generic.o
38
39ifeq ($(CONFIG_BLK_DEV_CMD640), m)
40 obj-m += cmd640.o
41endif
42
43EXTRA_CFLAGS := -Idrivers/ide
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index 211ae46e3e0c..211ae46e3e0c 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 799557c25eef..799557c25eef 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/piix.c
index d63f9fdca76b..d63f9fdca76b 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/piix.c
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/pmac.c
index 2e19d6298536..2e19d6298536 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/pmac.c
diff --git a/drivers/ide/ppc/Makefile b/drivers/ide/ppc/Makefile
deleted file mode 100644
index 74e52adcdf4b..000000000000
--- a/drivers/ide/ppc/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1
2obj-$(CONFIG_BLK_DEV_IDE_PMAC) += pmac.o
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/q40ide.c
index 4af4a8ce4cdf..4af4a8ce4cdf 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/q40ide.c
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/qd65xx.c
index bc27c7aba936..bc27c7aba936 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/qd65xx.c
diff --git a/drivers/ide/legacy/qd65xx.h b/drivers/ide/qd65xx.h
index c83dea85e621..c83dea85e621 100644
--- a/drivers/ide/legacy/qd65xx.h
+++ b/drivers/ide/qd65xx.h
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/rapide.c
index 78d27d9ae430..78d27d9ae430 100644
--- a/drivers/ide/arm/rapide.c
+++ b/drivers/ide/rapide.c
diff --git a/drivers/ide/pci/rz1000.c b/drivers/ide/rz1000.c
index 7daf0135cbac..7daf0135cbac 100644
--- a/drivers/ide/pci/rz1000.c
+++ b/drivers/ide/rz1000.c
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/sc1200.c
index f1a8758e3a99..f1a8758e3a99 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/sc1200.c
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/scc_pata.c
index 49f163aa51e3..49f163aa51e3 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/scc_pata.c
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/serverworks.c
index 437bc919dafd..437bc919dafd 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/serverworks.c
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/sgiioc4.c
index 8af9b23499fd..8af9b23499fd 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/siimage.c
index eb4faf92c571..eb4faf92c571 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/siimage.c
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/sis5513.c
index ad32e18c5ba3..ad32e18c5ba3 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/sis5513.c
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/sl82c105.c
index 84dc33602ff8..84dc33602ff8 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/sl82c105.c
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/slc90e66.c
index 0f759e4ed779..0f759e4ed779 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/slc90e66.c
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/tc86c001.c
index 93e2cce4b296..93e2cce4b296 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/tc86c001.c
diff --git a/drivers/ide/pci/triflex.c b/drivers/ide/triflex.c
index b6ff40336aa9..b6ff40336aa9 100644
--- a/drivers/ide/pci/triflex.c
+++ b/drivers/ide/triflex.c
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/trm290.c
index 75ea61526566..75ea61526566 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/trm290.c
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/umc8672.c
index 1da076e0c917..1da076e0c917 100644
--- a/drivers/ide/legacy/umc8672.c
+++ b/drivers/ide/umc8672.c
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/via82cxxx.c
index 2a812d3207e9..2a812d3207e9 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 49c45feccd5b..5c54fc2350be 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -406,19 +406,15 @@ static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
406 406
407 if (i == qp_info->snoop_table_size) { 407 if (i == qp_info->snoop_table_size) {
408 /* Grow table. */ 408 /* Grow table. */
409 new_snoop_table = kmalloc(sizeof mad_snoop_priv * 409 new_snoop_table = krealloc(qp_info->snoop_table,
410 qp_info->snoop_table_size + 1, 410 sizeof mad_snoop_priv *
411 GFP_ATOMIC); 411 (qp_info->snoop_table_size + 1),
412 GFP_ATOMIC);
412 if (!new_snoop_table) { 413 if (!new_snoop_table) {
413 i = -ENOMEM; 414 i = -ENOMEM;
414 goto out; 415 goto out;
415 } 416 }
416 if (qp_info->snoop_table) { 417
417 memcpy(new_snoop_table, qp_info->snoop_table,
418 sizeof mad_snoop_priv *
419 qp_info->snoop_table_size);
420 kfree(qp_info->snoop_table);
421 }
422 qp_info->snoop_table = new_snoop_table; 418 qp_info->snoop_table = new_snoop_table;
423 qp_info->snoop_table_size++; 419 qp_info->snoop_table_size++;
424 } 420 }
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 3ddacf39b7ba..4346a24568fb 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -904,8 +904,8 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
904 904
905 mutex_lock(&file->mut); 905 mutex_lock(&file->mut);
906 mc = ucma_alloc_multicast(ctx); 906 mc = ucma_alloc_multicast(ctx);
907 if (IS_ERR(mc)) { 907 if (!mc) {
908 ret = PTR_ERR(mc); 908 ret = -ENOMEM;
909 goto err1; 909 goto err1;
910 } 910 }
911 911
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index c325c44807e8..44e936e48a31 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1942,6 +1942,7 @@ fail4:
1942fail3: 1942fail3:
1943 cxgb3_free_atid(ep->com.tdev, ep->atid); 1943 cxgb3_free_atid(ep->com.tdev, ep->atid);
1944fail2: 1944fail2:
1945 cm_id->rem_ref(cm_id);
1945 put_ep(&ep->com); 1946 put_ep(&ep->com);
1946out: 1947out:
1947 return err; 1948 return err;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 5d7b7855afb9..4df887af66a5 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -128,6 +128,8 @@ struct ehca_shca {
128 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */ 128 /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
129 u32 hca_cap_mr_pgsize; 129 u32 hca_cap_mr_pgsize;
130 int max_mtu; 130 int max_mtu;
131 int max_num_qps;
132 int max_num_cqs;
131 atomic_t num_cqs; 133 atomic_t num_cqs;
132 atomic_t num_qps; 134 atomic_t num_qps;
133}; 135};
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 33647a95eb9a..2f4c28a30271 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -132,9 +132,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
132 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) 132 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
133 return ERR_PTR(-EINVAL); 133 return ERR_PTR(-EINVAL);
134 134
135 if (!atomic_add_unless(&shca->num_cqs, 1, ehca_max_cq)) { 135 if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
136 ehca_err(device, "Unable to create CQ, max number of %i " 136 ehca_err(device, "Unable to create CQ, max number of %i "
137 "CQs reached.", ehca_max_cq); 137 "CQs reached.", shca->max_num_cqs);
138 ehca_err(device, "To increase the maximum number of CQs " 138 ehca_err(device, "To increase the maximum number of CQs "
139 "use the number_of_cqs module parameter.\n"); 139 "use the number_of_cqs module parameter.\n");
140 return ERR_PTR(-ENOSPC); 140 return ERR_PTR(-ENOSPC);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 598844d2edc9..bb02a86aa526 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -44,6 +44,8 @@
44#include <linux/slab.h> 44#include <linux/slab.h>
45#endif 45#endif
46 46
47#include <linux/notifier.h>
48#include <linux/memory.h>
47#include "ehca_classes.h" 49#include "ehca_classes.h"
48#include "ehca_iverbs.h" 50#include "ehca_iverbs.h"
49#include "ehca_mrmw.h" 51#include "ehca_mrmw.h"
@@ -366,22 +368,23 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
366 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; 368 shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
367 369
368 /* Set maximum number of CQs and QPs to calculate EQ size */ 370 /* Set maximum number of CQs and QPs to calculate EQ size */
369 if (ehca_max_qp == -1) 371 if (shca->max_num_qps == -1)
370 ehca_max_qp = min_t(int, rblock->max_qp, EHCA_MAX_NUM_QUEUES); 372 shca->max_num_qps = min_t(int, rblock->max_qp,
371 else if (ehca_max_qp < 1 || ehca_max_qp > rblock->max_qp) { 373 EHCA_MAX_NUM_QUEUES);
372 ehca_gen_err("Requested number of QPs is out of range (1 - %i) " 374 else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
373 "specified by HW", rblock->max_qp); 375 ehca_gen_warn("The requested number of QPs is out of range "
374 ret = -EINVAL; 376 "(1 - %i) specified by HW. Value is set to %i",
375 goto sense_attributes1; 377 rblock->max_qp, rblock->max_qp);
378 shca->max_num_qps = rblock->max_qp;
376 } 379 }
377 380
378 if (ehca_max_cq == -1) 381 if (shca->max_num_cqs == -1)
379 ehca_max_cq = min_t(int, rblock->max_cq, EHCA_MAX_NUM_QUEUES); 382 shca->max_num_cqs = min_t(int, rblock->max_cq,
380 else if (ehca_max_cq < 1 || ehca_max_cq > rblock->max_cq) { 383 EHCA_MAX_NUM_QUEUES);
381 ehca_gen_err("Requested number of CQs is out of range (1 - %i) " 384 else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
382 "specified by HW", rblock->max_cq); 385 ehca_gen_warn("The requested number of CQs is out of range "
383 ret = -EINVAL; 386 "(1 - %i) specified by HW. Value is set to %i",
384 goto sense_attributes1; 387 rblock->max_cq, rblock->max_cq);
385 } 388 }
386 389
387 /* query max MTU from first port -- it's the same for all ports */ 390 /* query max MTU from first port -- it's the same for all ports */
@@ -733,9 +736,13 @@ static int __devinit ehca_probe(struct of_device *dev,
733 ehca_gen_err("Cannot allocate shca memory."); 736 ehca_gen_err("Cannot allocate shca memory.");
734 return -ENOMEM; 737 return -ENOMEM;
735 } 738 }
739
736 mutex_init(&shca->modify_mutex); 740 mutex_init(&shca->modify_mutex);
737 atomic_set(&shca->num_cqs, 0); 741 atomic_set(&shca->num_cqs, 0);
738 atomic_set(&shca->num_qps, 0); 742 atomic_set(&shca->num_qps, 0);
743 shca->max_num_qps = ehca_max_qp;
744 shca->max_num_cqs = ehca_max_cq;
745
739 for (i = 0; i < ARRAY_SIZE(shca->sport); i++) 746 for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
740 spin_lock_init(&shca->sport[i].mod_sqp_lock); 747 spin_lock_init(&shca->sport[i].mod_sqp_lock);
741 748
@@ -755,7 +762,7 @@ static int __devinit ehca_probe(struct of_device *dev,
755 goto probe1; 762 goto probe1;
756 } 763 }
757 764
758 eq_size = 2 * ehca_max_cq + 4 * ehca_max_qp; 765 eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
759 /* create event queues */ 766 /* create event queues */
760 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size); 767 ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
761 if (ret) { 768 if (ret) {
@@ -964,6 +971,41 @@ void ehca_poll_eqs(unsigned long data)
964 spin_unlock(&shca_list_lock); 971 spin_unlock(&shca_list_lock);
965} 972}
966 973
974static int ehca_mem_notifier(struct notifier_block *nb,
975 unsigned long action, void *data)
976{
977 static unsigned long ehca_dmem_warn_time;
978
979 switch (action) {
980 case MEM_CANCEL_OFFLINE:
981 case MEM_CANCEL_ONLINE:
982 case MEM_ONLINE:
983 case MEM_OFFLINE:
984 return NOTIFY_OK;
985 case MEM_GOING_ONLINE:
986 case MEM_GOING_OFFLINE:
987 /* only ok if no hca is attached to the lpar */
988 spin_lock(&shca_list_lock);
989 if (list_empty(&shca_list)) {
990 spin_unlock(&shca_list_lock);
991 return NOTIFY_OK;
992 } else {
993 spin_unlock(&shca_list_lock);
994 if (printk_timed_ratelimit(&ehca_dmem_warn_time,
995 30 * 1000))
996 ehca_gen_err("DMEM operations are not allowed"
997 "as long as an ehca adapter is"
998 "attached to the LPAR");
999 return NOTIFY_BAD;
1000 }
1001 }
1002 return NOTIFY_OK;
1003}
1004
1005static struct notifier_block ehca_mem_nb = {
1006 .notifier_call = ehca_mem_notifier,
1007};
1008
967static int __init ehca_module_init(void) 1009static int __init ehca_module_init(void)
968{ 1010{
969 int ret; 1011 int ret;
@@ -991,6 +1033,12 @@ static int __init ehca_module_init(void)
991 goto module_init2; 1033 goto module_init2;
992 } 1034 }
993 1035
1036 ret = register_memory_notifier(&ehca_mem_nb);
1037 if (ret) {
1038 ehca_gen_err("Failed registering memory add/remove notifier");
1039 goto module_init3;
1040 }
1041
994 if (ehca_poll_all_eqs != 1) { 1042 if (ehca_poll_all_eqs != 1) {
995 ehca_gen_err("WARNING!!!"); 1043 ehca_gen_err("WARNING!!!");
996 ehca_gen_err("It is possible to lose interrupts."); 1044 ehca_gen_err("It is possible to lose interrupts.");
@@ -1003,6 +1051,9 @@ static int __init ehca_module_init(void)
1003 1051
1004 return 0; 1052 return 0;
1005 1053
1054module_init3:
1055 ibmebus_unregister_driver(&ehca_driver);
1056
1006module_init2: 1057module_init2:
1007 ehca_destroy_slab_caches(); 1058 ehca_destroy_slab_caches();
1008 1059
@@ -1018,6 +1069,8 @@ static void __exit ehca_module_exit(void)
1018 1069
1019 ibmebus_unregister_driver(&ehca_driver); 1070 ibmebus_unregister_driver(&ehca_driver);
1020 1071
1072 unregister_memory_notifier(&ehca_mem_nb);
1073
1021 ehca_destroy_slab_caches(); 1074 ehca_destroy_slab_caches();
1022 1075
1023 ehca_destroy_comp_pool(); 1076 ehca_destroy_comp_pool();
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 4dbe2870e014..4d54b9f64567 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -465,9 +465,9 @@ static struct ehca_qp *internal_create_qp(
465 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num; 465 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
466 unsigned long flags; 466 unsigned long flags;
467 467
468 if (!atomic_add_unless(&shca->num_qps, 1, ehca_max_qp)) { 468 if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
469 ehca_err(pd->device, "Unable to create QP, max number of %i " 469 ehca_err(pd->device, "Unable to create QP, max number of %i "
470 "QPs reached.", ehca_max_qp); 470 "QPs reached.", shca->max_num_qps);
471 ehca_err(pd->device, "To increase the maximum number of QPs " 471 ehca_err(pd->device, "To increase the maximum number of QPs "
472 "use the number_of_qps module parameter.\n"); 472 "use the number_of_qps module parameter.\n");
473 return ERR_PTR(-ENOSPC); 473 return ERR_PTR(-ENOSPC);
@@ -502,6 +502,12 @@ static struct ehca_qp *internal_create_qp(
502 if (init_attr->srq) { 502 if (init_attr->srq) {
503 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq); 503 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
504 504
505 if (qp_type == IB_QPT_UC) {
506 ehca_err(pd->device, "UC with SRQ not supported");
507 atomic_dec(&shca->num_qps);
508 return ERR_PTR(-EINVAL);
509 }
510
505 has_srq = 1; 511 has_srq = 1;
506 parms.ext_type = EQPT_SRQBASE; 512 parms.ext_type = EQPT_SRQBASE;
507 parms.srq_qpn = my_srq->real_qp_num; 513 parms.srq_qpn = my_srq->real_qp_num;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index cdca3a511e1c..606f1e2ef284 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -298,7 +298,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
298 int p, q; 298 int p, q;
299 int ret; 299 int ret;
300 300
301 for (p = 0; p < dev->dev->caps.num_ports; ++p) 301 for (p = 0; p < dev->num_ports; ++p)
302 for (q = 0; q <= 1; ++q) { 302 for (q = 0; q <= 1; ++q) {
303 agent = ib_register_mad_agent(&dev->ib_dev, p + 1, 303 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
304 q ? IB_QPT_GSI : IB_QPT_SMI, 304 q ? IB_QPT_GSI : IB_QPT_SMI,
@@ -314,7 +314,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
314 return 0; 314 return 0;
315 315
316err: 316err:
317 for (p = 0; p < dev->dev->caps.num_ports; ++p) 317 for (p = 0; p < dev->num_ports; ++p)
318 for (q = 0; q <= 1; ++q) 318 for (q = 0; q <= 1; ++q)
319 if (dev->send_agent[p][q]) 319 if (dev->send_agent[p][q])
320 ib_unregister_mad_agent(dev->send_agent[p][q]); 320 ib_unregister_mad_agent(dev->send_agent[p][q]);
@@ -327,7 +327,7 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
327 struct ib_mad_agent *agent; 327 struct ib_mad_agent *agent;
328 int p, q; 328 int p, q;
329 329
330 for (p = 0; p < dev->dev->caps.num_ports; ++p) { 330 for (p = 0; p < dev->num_ports; ++p) {
331 for (q = 0; q <= 1; ++q) { 331 for (q = 0; q <= 1; ++q) {
332 agent = dev->send_agent[p][q]; 332 agent = dev->send_agent[p][q];
333 dev->send_agent[p][q] = NULL; 333 dev->send_agent[p][q] = NULL;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index a3c2851c0545..2e80f8f47b02 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -574,7 +574,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
574 ibdev->ib_dev.owner = THIS_MODULE; 574 ibdev->ib_dev.owner = THIS_MODULE;
575 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; 575 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
576 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; 576 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
577 ibdev->ib_dev.phys_port_cnt = dev->caps.num_ports; 577 ibdev->num_ports = 0;
578 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
579 ibdev->num_ports++;
580 ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
578 ibdev->ib_dev.num_comp_vectors = 1; 581 ibdev->ib_dev.num_comp_vectors = 1;
579 ibdev->ib_dev.dma_device = &dev->pdev->dev; 582 ibdev->ib_dev.dma_device = &dev->pdev->dev;
580 583
@@ -691,7 +694,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
691 struct mlx4_ib_dev *ibdev = ibdev_ptr; 694 struct mlx4_ib_dev *ibdev = ibdev_ptr;
692 int p; 695 int p;
693 696
694 for (p = 1; p <= dev->caps.num_ports; ++p) 697 for (p = 1; p <= ibdev->num_ports; ++p)
695 mlx4_CLOSE_PORT(dev, p); 698 mlx4_CLOSE_PORT(dev, p);
696 699
697 mlx4_ib_mad_cleanup(ibdev); 700 mlx4_ib_mad_cleanup(ibdev);
@@ -706,6 +709,10 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
706 enum mlx4_dev_event event, int port) 709 enum mlx4_dev_event event, int port)
707{ 710{
708 struct ib_event ibev; 711 struct ib_event ibev;
712 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
713
714 if (port > ibdev->num_ports)
715 return;
709 716
710 switch (event) { 717 switch (event) {
711 case MLX4_DEV_EVENT_PORT_UP: 718 case MLX4_DEV_EVENT_PORT_UP:
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6e2b0dc21b61..9974e886b8de 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -162,6 +162,7 @@ struct mlx4_ib_ah {
162struct mlx4_ib_dev { 162struct mlx4_ib_dev {
163 struct ib_device ib_dev; 163 struct ib_device ib_dev;
164 struct mlx4_dev *dev; 164 struct mlx4_dev *dev;
165 int num_ports;
165 void __iomem *uar_map; 166 void __iomem *uar_map;
166 167
167 struct mlx4_uar priv_uar; 168 struct mlx4_uar priv_uar;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index baa01deb2436..39167a797f99 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -451,6 +451,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
451 struct ib_qp_init_attr *init_attr, 451 struct ib_qp_init_attr *init_attr,
452 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp) 452 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
453{ 453{
454 int qpn;
454 int err; 455 int err;
455 456
456 mutex_init(&qp->mutex); 457 mutex_init(&qp->mutex);
@@ -545,9 +546,17 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
545 } 546 }
546 } 547 }
547 548
548 err = mlx4_qp_alloc(dev->dev, sqpn, &qp->mqp); 549 if (sqpn) {
550 qpn = sqpn;
551 } else {
552 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
553 if (err)
554 goto err_wrid;
555 }
556
557 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
549 if (err) 558 if (err)
550 goto err_wrid; 559 goto err_qpn;
551 560
552 /* 561 /*
553 * Hardware wants QPN written in big-endian order (after 562 * Hardware wants QPN written in big-endian order (after
@@ -560,6 +569,10 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
560 569
561 return 0; 570 return 0;
562 571
572err_qpn:
573 if (!sqpn)
574 mlx4_qp_release_range(dev->dev, qpn, 1);
575
563err_wrid: 576err_wrid:
564 if (pd->uobject) { 577 if (pd->uobject) {
565 if (!init_attr->srq) 578 if (!init_attr->srq)
@@ -655,6 +668,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
655 mlx4_ib_unlock_cqs(send_cq, recv_cq); 668 mlx4_ib_unlock_cqs(send_cq, recv_cq);
656 669
657 mlx4_qp_free(dev->dev, &qp->mqp); 670 mlx4_qp_free(dev->dev, &qp->mqp);
671
672 if (!is_sqp(dev, qp))
673 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
674
658 mlx4_mtt_cleanup(dev->dev, &qp->mtt); 675 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
659 676
660 if (is_user) { 677 if (is_user) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 68ba5c3482e4..e0c7dfabf2b4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -507,6 +507,7 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev);
507void ipoib_drain_cq(struct net_device *dev); 507void ipoib_drain_cq(struct net_device *dev);
508 508
509void ipoib_set_ethtool_ops(struct net_device *dev); 509void ipoib_set_ethtool_ops(struct net_device *dev);
510int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
510 511
511#ifdef CONFIG_INFINIBAND_IPOIB_CM 512#ifdef CONFIG_INFINIBAND_IPOIB_CM
512 513
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 66af5c1a76e5..e9795f60e5d6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -42,6 +42,13 @@ static void ipoib_get_drvinfo(struct net_device *netdev,
42 strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1); 42 strncpy(drvinfo->driver, "ipoib", sizeof(drvinfo->driver) - 1);
43} 43}
44 44
45static u32 ipoib_get_rx_csum(struct net_device *dev)
46{
47 struct ipoib_dev_priv *priv = netdev_priv(dev);
48 return test_bit(IPOIB_FLAG_CSUM, &priv->flags) &&
49 !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
50}
51
45static int ipoib_get_coalesce(struct net_device *dev, 52static int ipoib_get_coalesce(struct net_device *dev,
46 struct ethtool_coalesce *coal) 53 struct ethtool_coalesce *coal)
47{ 54{
@@ -129,7 +136,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev,
129 136
130static const struct ethtool_ops ipoib_ethtool_ops = { 137static const struct ethtool_ops ipoib_ethtool_ops = {
131 .get_drvinfo = ipoib_get_drvinfo, 138 .get_drvinfo = ipoib_get_drvinfo,
132 .get_tso = ethtool_op_get_tso, 139 .get_rx_csum = ipoib_get_rx_csum,
133 .get_coalesce = ipoib_get_coalesce, 140 .get_coalesce = ipoib_get_coalesce,
134 .set_coalesce = ipoib_set_coalesce, 141 .set_coalesce = ipoib_set_coalesce,
135 .get_flags = ethtool_op_get_flags, 142 .get_flags = ethtool_op_get_flags,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0e748aeeae99..28eb6f03c588 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -685,10 +685,6 @@ int ipoib_ib_dev_open(struct net_device *dev)
685 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, 685 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
686 round_jiffies_relative(HZ)); 686 round_jiffies_relative(HZ));
687 687
688 init_timer(&priv->poll_timer);
689 priv->poll_timer.function = ipoib_ib_tx_timer_func;
690 priv->poll_timer.data = (unsigned long)dev;
691
692 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 688 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
693 689
694 return 0; 690 return 0;
@@ -906,6 +902,9 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
906 return -ENODEV; 902 return -ENODEV;
907 } 903 }
908 904
905 setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func,
906 (unsigned long) dev);
907
909 if (dev->flags & IFF_UP) { 908 if (dev->flags & IFF_UP) {
910 if (ipoib_ib_dev_open(dev)) { 909 if (ipoib_ib_dev_open(dev)) {
911 ipoib_transport_dev_cleanup(dev); 910 ipoib_transport_dev_cleanup(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index c0ee514396df..fddded7900d1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1173,11 +1173,48 @@ int ipoib_add_pkey_attr(struct net_device *dev)
1173 return device_create_file(&dev->dev, &dev_attr_pkey); 1173 return device_create_file(&dev->dev, &dev_attr_pkey);
1174} 1174}
1175 1175
1176int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1177{
1178 struct ib_device_attr *device_attr;
1179 int result = -ENOMEM;
1180
1181 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
1182 if (!device_attr) {
1183 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1184 hca->name, sizeof *device_attr);
1185 return result;
1186 }
1187
1188 result = ib_query_device(hca, device_attr);
1189 if (result) {
1190 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1191 hca->name, result);
1192 kfree(device_attr);
1193 return result;
1194 }
1195 priv->hca_caps = device_attr->device_cap_flags;
1196
1197 kfree(device_attr);
1198
1199 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1200 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
1201 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1202 }
1203
1204 if (lro)
1205 priv->dev->features |= NETIF_F_LRO;
1206
1207 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1208 priv->dev->features |= NETIF_F_TSO;
1209
1210 return 0;
1211}
1212
1213
1176static struct net_device *ipoib_add_port(const char *format, 1214static struct net_device *ipoib_add_port(const char *format,
1177 struct ib_device *hca, u8 port) 1215 struct ib_device *hca, u8 port)
1178{ 1216{
1179 struct ipoib_dev_priv *priv; 1217 struct ipoib_dev_priv *priv;
1180 struct ib_device_attr *device_attr;
1181 struct ib_port_attr attr; 1218 struct ib_port_attr attr;
1182 int result = -ENOMEM; 1219 int result = -ENOMEM;
1183 1220
@@ -1206,31 +1243,8 @@ static struct net_device *ipoib_add_port(const char *format,
1206 goto device_init_failed; 1243 goto device_init_failed;
1207 } 1244 }
1208 1245
1209 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL); 1246 if (ipoib_set_dev_features(priv, hca))
1210 if (!device_attr) {
1211 printk(KERN_WARNING "%s: allocation of %zu bytes failed\n",
1212 hca->name, sizeof *device_attr);
1213 goto device_init_failed; 1247 goto device_init_failed;
1214 }
1215
1216 result = ib_query_device(hca, device_attr);
1217 if (result) {
1218 printk(KERN_WARNING "%s: ib_query_device failed (ret = %d)\n",
1219 hca->name, result);
1220 kfree(device_attr);
1221 goto device_init_failed;
1222 }
1223 priv->hca_caps = device_attr->device_cap_flags;
1224
1225 kfree(device_attr);
1226
1227 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
1228 set_bit(IPOIB_FLAG_CSUM, &priv->flags);
1229 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1230 }
1231
1232 if (lro)
1233 priv->dev->features |= NETIF_F_LRO;
1234 1248
1235 /* 1249 /*
1236 * Set the full membership bit, so that we join the right 1250 * Set the full membership bit, so that we join the right
@@ -1266,9 +1280,6 @@ static struct net_device *ipoib_add_port(const char *format,
1266 goto event_failed; 1280 goto event_failed;
1267 } 1281 }
1268 1282
1269 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1270 priv->dev->features |= NETIF_F_TSO;
1271
1272 result = register_netdev(priv->dev); 1283 result = register_netdev(priv->dev);
1273 if (result) { 1284 if (result) {
1274 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n", 1285 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index b08eb56196d3..2cf1a4088718 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -93,6 +93,10 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
93 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 93 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
94 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 94 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
95 95
96 result = ipoib_set_dev_features(priv, ppriv->ca);
97 if (result)
98 goto device_init_failed;
99
96 priv->pkey = pkey; 100 priv->pkey = pkey;
97 101
98 memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); 102 memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f1ef33dfd8cf..1c615804ea76 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
34obj-$(CONFIG_DM_DELAY) += dm-delay.o 34obj-$(CONFIG_DM_DELAY) += dm-delay.o
35obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o 35obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
36obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o 36obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
37obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o 37obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
38obj-$(CONFIG_DM_ZERO) += dm-zero.o 38obj-$(CONFIG_DM_ZERO) += dm-zero.o
39 39
40quiet_cmd_unroll = UNROLL $@ 40quiet_cmd_unroll = UNROLL $@
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 682ef9e6acd3..ce26c84af064 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -23,7 +23,7 @@
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/unaligned.h> 24#include <asm/unaligned.h>
25 25
26#include "dm.h" 26#include <linux/device-mapper.h>
27 27
28#define DM_MSG_PREFIX "crypt" 28#define DM_MSG_PREFIX "crypt"
29#define MESG_STR(x) x, sizeof(x) 29#define MESG_STR(x) x, sizeof(x)
@@ -56,6 +56,7 @@ struct dm_crypt_io {
56 atomic_t pending; 56 atomic_t pending;
57 int error; 57 int error;
58 sector_t sector; 58 sector_t sector;
59 struct dm_crypt_io *base_io;
59}; 60};
60 61
61struct dm_crypt_request { 62struct dm_crypt_request {
@@ -93,7 +94,6 @@ struct crypt_config {
93 94
94 struct workqueue_struct *io_queue; 95 struct workqueue_struct *io_queue;
95 struct workqueue_struct *crypt_queue; 96 struct workqueue_struct *crypt_queue;
96 wait_queue_head_t writeq;
97 97
98 /* 98 /*
99 * crypto related data 99 * crypto related data
@@ -534,6 +534,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
534 io->base_bio = bio; 534 io->base_bio = bio;
535 io->sector = sector; 535 io->sector = sector;
536 io->error = 0; 536 io->error = 0;
537 io->base_io = NULL;
537 atomic_set(&io->pending, 0); 538 atomic_set(&io->pending, 0);
538 539
539 return io; 540 return io;
@@ -547,6 +548,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
547/* 548/*
548 * One of the bios was finished. Check for completion of 549 * One of the bios was finished. Check for completion of
549 * the whole request and correctly clean up the buffer. 550 * the whole request and correctly clean up the buffer.
551 * If base_io is set, wait for the last fragment to complete.
550 */ 552 */
551static void crypt_dec_pending(struct dm_crypt_io *io) 553static void crypt_dec_pending(struct dm_crypt_io *io)
552{ 554{
@@ -555,7 +557,14 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
555 if (!atomic_dec_and_test(&io->pending)) 557 if (!atomic_dec_and_test(&io->pending))
556 return; 558 return;
557 559
558 bio_endio(io->base_bio, io->error); 560 if (likely(!io->base_io))
561 bio_endio(io->base_bio, io->error);
562 else {
563 if (io->error && !io->base_io->error)
564 io->base_io->error = io->error;
565 crypt_dec_pending(io->base_io);
566 }
567
559 mempool_free(io, cc->io_pool); 568 mempool_free(io, cc->io_pool);
560} 569}
561 570
@@ -646,10 +655,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io)
646static void kcryptd_io_write(struct dm_crypt_io *io) 655static void kcryptd_io_write(struct dm_crypt_io *io)
647{ 656{
648 struct bio *clone = io->ctx.bio_out; 657 struct bio *clone = io->ctx.bio_out;
649 struct crypt_config *cc = io->target->private;
650
651 generic_make_request(clone); 658 generic_make_request(clone);
652 wake_up(&cc->writeq);
653} 659}
654 660
655static void kcryptd_io(struct work_struct *work) 661static void kcryptd_io(struct work_struct *work)
@@ -688,7 +694,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
688 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 694 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
689 695
690 clone->bi_sector = cc->start + io->sector; 696 clone->bi_sector = cc->start + io->sector;
691 io->sector += bio_sectors(clone);
692 697
693 if (async) 698 if (async)
694 kcryptd_queue_io(io); 699 kcryptd_queue_io(io);
@@ -700,16 +705,18 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
700{ 705{
701 struct crypt_config *cc = io->target->private; 706 struct crypt_config *cc = io->target->private;
702 struct bio *clone; 707 struct bio *clone;
708 struct dm_crypt_io *new_io;
703 int crypt_finished; 709 int crypt_finished;
704 unsigned out_of_pages = 0; 710 unsigned out_of_pages = 0;
705 unsigned remaining = io->base_bio->bi_size; 711 unsigned remaining = io->base_bio->bi_size;
712 sector_t sector = io->sector;
706 int r; 713 int r;
707 714
708 /* 715 /*
709 * Prevent io from disappearing until this function completes. 716 * Prevent io from disappearing until this function completes.
710 */ 717 */
711 crypt_inc_pending(io); 718 crypt_inc_pending(io);
712 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); 719 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
713 720
714 /* 721 /*
715 * The allocated buffers can be smaller than the whole bio, 722 * The allocated buffers can be smaller than the whole bio,
@@ -726,6 +733,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
726 io->ctx.idx_out = 0; 733 io->ctx.idx_out = 0;
727 734
728 remaining -= clone->bi_size; 735 remaining -= clone->bi_size;
736 sector += bio_sectors(clone);
729 737
730 crypt_inc_pending(io); 738 crypt_inc_pending(io);
731 r = crypt_convert(cc, &io->ctx); 739 r = crypt_convert(cc, &io->ctx);
@@ -741,6 +749,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
741 */ 749 */
742 if (unlikely(r < 0)) 750 if (unlikely(r < 0))
743 break; 751 break;
752
753 io->sector = sector;
744 } 754 }
745 755
746 /* 756 /*
@@ -750,8 +760,33 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
750 if (unlikely(out_of_pages)) 760 if (unlikely(out_of_pages))
751 congestion_wait(WRITE, HZ/100); 761 congestion_wait(WRITE, HZ/100);
752 762
753 if (unlikely(remaining)) 763 /*
754 wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); 764 * With async crypto it is unsafe to share the crypto context
765 * between fragments, so switch to a new dm_crypt_io structure.
766 */
767 if (unlikely(!crypt_finished && remaining)) {
768 new_io = crypt_io_alloc(io->target, io->base_bio,
769 sector);
770 crypt_inc_pending(new_io);
771 crypt_convert_init(cc, &new_io->ctx, NULL,
772 io->base_bio, sector);
773 new_io->ctx.idx_in = io->ctx.idx_in;
774 new_io->ctx.offset_in = io->ctx.offset_in;
775
776 /*
777 * Fragments after the first use the base_io
778 * pending count.
779 */
780 if (!io->base_io)
781 new_io->base_io = io;
782 else {
783 new_io->base_io = io->base_io;
784 crypt_inc_pending(io->base_io);
785 crypt_dec_pending(io);
786 }
787
788 io = new_io;
789 }
755 } 790 }
756 791
757 crypt_dec_pending(io); 792 crypt_dec_pending(io);
@@ -1078,7 +1113,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1078 goto bad_crypt_queue; 1113 goto bad_crypt_queue;
1079 } 1114 }
1080 1115
1081 init_waitqueue_head(&cc->writeq);
1082 ti->private = cc; 1116 ti->private = cc;
1083 return 0; 1117 return 0;
1084 1118
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index bdd37f881c42..848b381f1173 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -13,7 +13,8 @@
13#include <linux/bio.h> 13#include <linux/bio.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15 15
16#include "dm.h" 16#include <linux/device-mapper.h>
17
17#include "dm-bio-list.h" 18#include "dm-bio-list.h"
18 19
19#define DM_MSG_PREFIX "delay" 20#define DM_MSG_PREFIX "delay"
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 769ab677f8e0..01590f3e0009 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -7,7 +7,6 @@
7 * This file is released under the GPL. 7 * This file is released under the GPL.
8 */ 8 */
9 9
10#include "dm.h"
11#include "dm-snap.h" 10#include "dm-snap.h"
12 11
13#include <linux/mm.h> 12#include <linux/mm.h>
@@ -105,6 +104,11 @@ struct pstore {
105 void *area; 104 void *area;
106 105
107 /* 106 /*
107 * An area of zeros used to clear the next area.
108 */
109 void *zero_area;
110
111 /*
108 * Used to keep track of which metadata area the data in 112 * Used to keep track of which metadata area the data in
109 * 'chunk' refers to. 113 * 'chunk' refers to.
110 */ 114 */
@@ -149,6 +153,13 @@ static int alloc_area(struct pstore *ps)
149 if (!ps->area) 153 if (!ps->area)
150 return r; 154 return r;
151 155
156 ps->zero_area = vmalloc(len);
157 if (!ps->zero_area) {
158 vfree(ps->area);
159 return r;
160 }
161 memset(ps->zero_area, 0, len);
162
152 return 0; 163 return 0;
153} 164}
154 165
@@ -156,6 +167,8 @@ static void free_area(struct pstore *ps)
156{ 167{
157 vfree(ps->area); 168 vfree(ps->area);
158 ps->area = NULL; 169 ps->area = NULL;
170 vfree(ps->zero_area);
171 ps->zero_area = NULL;
159} 172}
160 173
161struct mdata_req { 174struct mdata_req {
@@ -220,25 +233,41 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
220 * Read or write a metadata area. Remembering to skip the first 233 * Read or write a metadata area. Remembering to skip the first
221 * chunk which holds the header. 234 * chunk which holds the header.
222 */ 235 */
223static int area_io(struct pstore *ps, chunk_t area, int rw) 236static int area_io(struct pstore *ps, int rw)
224{ 237{
225 int r; 238 int r;
226 chunk_t chunk; 239 chunk_t chunk;
227 240
228 chunk = area_location(ps, area); 241 chunk = area_location(ps, ps->current_area);
229 242
230 r = chunk_io(ps, chunk, rw, 0); 243 r = chunk_io(ps, chunk, rw, 0);
231 if (r) 244 if (r)
232 return r; 245 return r;
233 246
234 ps->current_area = area;
235 return 0; 247 return 0;
236} 248}
237 249
238static int zero_area(struct pstore *ps, chunk_t area) 250static void zero_memory_area(struct pstore *ps)
239{ 251{
240 memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); 252 memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
241 return area_io(ps, area, WRITE); 253}
254
255static int zero_disk_area(struct pstore *ps, chunk_t area)
256{
257 struct dm_io_region where = {
258 .bdev = ps->snap->cow->bdev,
259 .sector = ps->snap->chunk_size * area_location(ps, area),
260 .count = ps->snap->chunk_size,
261 };
262 struct dm_io_request io_req = {
263 .bi_rw = WRITE,
264 .mem.type = DM_IO_VMA,
265 .mem.ptr.vma = ps->zero_area,
266 .client = ps->io_client,
267 .notify.fn = NULL,
268 };
269
270 return dm_io(&io_req, 1, &where, NULL);
242} 271}
243 272
244static int read_header(struct pstore *ps, int *new_snapshot) 273static int read_header(struct pstore *ps, int *new_snapshot)
@@ -411,15 +440,14 @@ static int insert_exceptions(struct pstore *ps, int *full)
411 440
412static int read_exceptions(struct pstore *ps) 441static int read_exceptions(struct pstore *ps)
413{ 442{
414 chunk_t area;
415 int r, full = 1; 443 int r, full = 1;
416 444
417 /* 445 /*
418 * Keeping reading chunks and inserting exceptions until 446 * Keeping reading chunks and inserting exceptions until
419 * we find a partially full area. 447 * we find a partially full area.
420 */ 448 */
421 for (area = 0; full; area++) { 449 for (ps->current_area = 0; full; ps->current_area++) {
422 r = area_io(ps, area, READ); 450 r = area_io(ps, READ);
423 if (r) 451 if (r)
424 return r; 452 return r;
425 453
@@ -428,6 +456,8 @@ static int read_exceptions(struct pstore *ps)
428 return r; 456 return r;
429 } 457 }
430 458
459 ps->current_area--;
460
431 return 0; 461 return 0;
432} 462}
433 463
@@ -486,12 +516,13 @@ static int persistent_read_metadata(struct exception_store *store)
486 return r; 516 return r;
487 } 517 }
488 518
489 r = zero_area(ps, 0); 519 ps->current_area = 0;
520 zero_memory_area(ps);
521 r = zero_disk_area(ps, 0);
490 if (r) { 522 if (r) {
491 DMWARN("zero_area(0) failed"); 523 DMWARN("zero_disk_area(0) failed");
492 return r; 524 return r;
493 } 525 }
494
495 } else { 526 } else {
496 /* 527 /*
497 * Sanity checks. 528 * Sanity checks.
@@ -551,7 +582,6 @@ static void persistent_commit(struct exception_store *store,
551 void (*callback) (void *, int success), 582 void (*callback) (void *, int success),
552 void *callback_context) 583 void *callback_context)
553{ 584{
554 int r;
555 unsigned int i; 585 unsigned int i;
556 struct pstore *ps = get_info(store); 586 struct pstore *ps = get_info(store);
557 struct disk_exception de; 587 struct disk_exception de;
@@ -572,33 +602,41 @@ static void persistent_commit(struct exception_store *store,
572 cb->context = callback_context; 602 cb->context = callback_context;
573 603
574 /* 604 /*
575 * If there are no more exceptions in flight, or we have 605 * If there are exceptions in flight and we have not yet
576 * filled this metadata area we commit the exceptions to 606 * filled this metadata area there's nothing more to do.
577 * disk.
578 */ 607 */
579 if (atomic_dec_and_test(&ps->pending_count) || 608 if (!atomic_dec_and_test(&ps->pending_count) &&
580 (ps->current_committed == ps->exceptions_per_area)) { 609 (ps->current_committed != ps->exceptions_per_area))
581 r = area_io(ps, ps->current_area, WRITE); 610 return;
582 if (r)
583 ps->valid = 0;
584 611
585 /* 612 /*
586 * Have we completely filled the current area ? 613 * If we completely filled the current area, then wipe the next one.
587 */ 614 */
588 if (ps->current_committed == ps->exceptions_per_area) { 615 if ((ps->current_committed == ps->exceptions_per_area) &&
589 ps->current_committed = 0; 616 zero_disk_area(ps, ps->current_area + 1))
590 r = zero_area(ps, ps->current_area + 1); 617 ps->valid = 0;
591 if (r)
592 ps->valid = 0;
593 }
594 618
595 for (i = 0; i < ps->callback_count; i++) { 619 /*
596 cb = ps->callbacks + i; 620 * Commit exceptions to disk.
597 cb->callback(cb->context, r == 0 ? 1 : 0); 621 */
598 } 622 if (ps->valid && area_io(ps, WRITE))
623 ps->valid = 0;
599 624
600 ps->callback_count = 0; 625 /*
626 * Advance to the next area if this one is full.
627 */
628 if (ps->current_committed == ps->exceptions_per_area) {
629 ps->current_committed = 0;
630 ps->current_area++;
631 zero_memory_area(ps);
601 } 632 }
633
634 for (i = 0; i < ps->callback_count; i++) {
635 cb = ps->callbacks + i;
636 cb->callback(cb->context, ps->valid);
637 }
638
639 ps->callback_count = 0;
602} 640}
603 641
604static void persistent_drop(struct exception_store *store) 642static void persistent_drop(struct exception_store *store)
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 4789c42d9a3a..2fd6d4450637 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -5,7 +5,7 @@
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
7 7
8#include "dm.h" 8#include <linux/device-mapper.h>
9 9
10#include <linux/bio.h> 10#include <linux/bio.h>
11#include <linux/mempool.h> 11#include <linux/mempool.h>
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 996802b8a452..3073618269ea 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -22,6 +22,7 @@
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/device-mapper.h>
25#include <linux/dm-kcopyd.h> 26#include <linux/dm-kcopyd.h>
26 27
27#include "dm.h" 28#include "dm.h"
@@ -268,6 +269,17 @@ static void push(struct list_head *jobs, struct kcopyd_job *job)
268 spin_unlock_irqrestore(&kc->job_lock, flags); 269 spin_unlock_irqrestore(&kc->job_lock, flags);
269} 270}
270 271
272
273static void push_head(struct list_head *jobs, struct kcopyd_job *job)
274{
275 unsigned long flags;
276 struct dm_kcopyd_client *kc = job->kc;
277
278 spin_lock_irqsave(&kc->job_lock, flags);
279 list_add(&job->list, jobs);
280 spin_unlock_irqrestore(&kc->job_lock, flags);
281}
282
271/* 283/*
272 * These three functions process 1 item from the corresponding 284 * These three functions process 1 item from the corresponding
273 * job list. 285 * job list.
@@ -398,7 +410,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
398 * We couldn't service this job ATM, so 410 * We couldn't service this job ATM, so
399 * push this job back onto the list. 411 * push this job back onto the list.
400 */ 412 */
401 push(jobs, job); 413 push_head(jobs, job);
402 break; 414 break;
403 } 415 }
404 416
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 6449bcdf84ca..1b29e9136758 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -5,12 +5,12 @@
5 */ 5 */
6 6
7#include "dm.h" 7#include "dm.h"
8
9#include <linux/module.h> 8#include <linux/module.h>
10#include <linux/init.h> 9#include <linux/init.h>
11#include <linux/blkdev.h> 10#include <linux/blkdev.h>
12#include <linux/bio.h> 11#include <linux/bio.h>
13#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/device-mapper.h>
14 14
15#define DM_MSG_PREFIX "linear" 15#define DM_MSG_PREFIX "linear"
16 16
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 5b48478c79f5..a8c0fc79ca78 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -12,7 +12,7 @@
12#include <linux/dm-io.h> 12#include <linux/dm-io.h>
13#include <linux/dm-dirty-log.h> 13#include <linux/dm-dirty-log.h>
14 14
15#include "dm.h" 15#include <linux/device-mapper.h>
16 16
17#define DM_MSG_PREFIX "dirty region log" 17#define DM_MSG_PREFIX "dirty region log"
18 18
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 9bf3460c5540..abf6e8cfaedb 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -5,7 +5,8 @@
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
7 7
8#include "dm.h" 8#include <linux/device-mapper.h>
9
9#include "dm-path-selector.h" 10#include "dm-path-selector.h"
10#include "dm-bio-list.h" 11#include "dm-bio-list.h"
11#include "dm-bio-record.h" 12#include "dm-bio-record.h"
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index ca1bb636a3e4..96ea226155b1 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -9,7 +9,8 @@
9 * Path selector registration. 9 * Path selector registration.
10 */ 10 */
11 11
12#include "dm.h" 12#include <linux/device-mapper.h>
13
13#include "dm-path-selector.h" 14#include "dm-path-selector.h"
14 15
15#include <linux/slab.h> 16#include <linux/slab.h>
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 29913e42c4ab..92dcc06832a4 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1,30 +1,30 @@
1/* 1/*
2 * Copyright (C) 2003 Sistina Software Limited. 2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
3 * 4 *
4 * This file is released under the GPL. 5 * This file is released under the GPL.
5 */ 6 */
6 7
7#include "dm.h"
8#include "dm-bio-list.h" 8#include "dm-bio-list.h"
9#include "dm-bio-record.h" 9#include "dm-bio-record.h"
10 10
11#include <linux/ctype.h>
12#include <linux/init.h> 11#include <linux/init.h>
13#include <linux/mempool.h> 12#include <linux/mempool.h>
14#include <linux/module.h> 13#include <linux/module.h>
15#include <linux/pagemap.h> 14#include <linux/pagemap.h>
16#include <linux/slab.h> 15#include <linux/slab.h>
17#include <linux/time.h>
18#include <linux/vmalloc.h>
19#include <linux/workqueue.h> 16#include <linux/workqueue.h>
20#include <linux/log2.h> 17#include <linux/device-mapper.h>
21#include <linux/hardirq.h>
22#include <linux/dm-io.h> 18#include <linux/dm-io.h>
23#include <linux/dm-dirty-log.h> 19#include <linux/dm-dirty-log.h>
24#include <linux/dm-kcopyd.h> 20#include <linux/dm-kcopyd.h>
21#include <linux/dm-region-hash.h>
25 22
26#define DM_MSG_PREFIX "raid1" 23#define DM_MSG_PREFIX "raid1"
24
25#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
27#define DM_IO_PAGES 64 26#define DM_IO_PAGES 64
27#define DM_KCOPYD_PAGES 64
28 28
29#define DM_RAID1_HANDLE_ERRORS 0x01 29#define DM_RAID1_HANDLE_ERRORS 0x01
30#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) 30#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
@@ -32,87 +32,6 @@
32static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); 32static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
33 33
34/*----------------------------------------------------------------- 34/*-----------------------------------------------------------------
35 * Region hash
36 *
37 * The mirror splits itself up into discrete regions. Each
38 * region can be in one of three states: clean, dirty,
39 * nosync. There is no need to put clean regions in the hash.
40 *
41 * In addition to being present in the hash table a region _may_
42 * be present on one of three lists.
43 *
44 * clean_regions: Regions on this list have no io pending to
45 * them, they are in sync, we are no longer interested in them,
46 * they are dull. rh_update_states() will remove them from the
47 * hash table.
48 *
49 * quiesced_regions: These regions have been spun down, ready
50 * for recovery. rh_recovery_start() will remove regions from
51 * this list and hand them to kmirrord, which will schedule the
52 * recovery io with kcopyd.
53 *
54 * recovered_regions: Regions that kcopyd has successfully
55 * recovered. rh_update_states() will now schedule any delayed
56 * io, up the recovery_count, and remove the region from the
57 * hash.
58 *
59 * There are 2 locks:
60 * A rw spin lock 'hash_lock' protects just the hash table,
61 * this is never held in write mode from interrupt context,
62 * which I believe means that we only have to disable irqs when
63 * doing a write lock.
64 *
65 * An ordinary spin lock 'region_lock' that protects the three
66 * lists in the region_hash, with the 'state', 'list' and
67 * 'bhs_delayed' fields of the regions. This is used from irq
68 * context, so all other uses will have to suspend local irqs.
69 *---------------------------------------------------------------*/
70struct mirror_set;
71struct region_hash {
72 struct mirror_set *ms;
73 uint32_t region_size;
74 unsigned region_shift;
75
76 /* holds persistent region state */
77 struct dm_dirty_log *log;
78
79 /* hash table */
80 rwlock_t hash_lock;
81 mempool_t *region_pool;
82 unsigned int mask;
83 unsigned int nr_buckets;
84 struct list_head *buckets;
85
86 spinlock_t region_lock;
87 atomic_t recovery_in_flight;
88 struct semaphore recovery_count;
89 struct list_head clean_regions;
90 struct list_head quiesced_regions;
91 struct list_head recovered_regions;
92 struct list_head failed_recovered_regions;
93};
94
95enum {
96 RH_CLEAN,
97 RH_DIRTY,
98 RH_NOSYNC,
99 RH_RECOVERING
100};
101
102struct region {
103 struct region_hash *rh; /* FIXME: can we get rid of this ? */
104 region_t key;
105 int state;
106
107 struct list_head hash_list;
108 struct list_head list;
109
110 atomic_t pending;
111 struct bio_list delayed_bios;
112};
113
114
115/*-----------------------------------------------------------------
116 * Mirror set structures. 35 * Mirror set structures.
117 *---------------------------------------------------------------*/ 36 *---------------------------------------------------------------*/
118enum dm_raid1_error { 37enum dm_raid1_error {
@@ -132,8 +51,7 @@ struct mirror {
132struct mirror_set { 51struct mirror_set {
133 struct dm_target *ti; 52 struct dm_target *ti;
134 struct list_head list; 53 struct list_head list;
135 struct region_hash rh; 54
136 struct dm_kcopyd_client *kcopyd_client;
137 uint64_t features; 55 uint64_t features;
138 56
139 spinlock_t lock; /* protects the lists */ 57 spinlock_t lock; /* protects the lists */
@@ -141,6 +59,8 @@ struct mirror_set {
141 struct bio_list writes; 59 struct bio_list writes;
142 struct bio_list failures; 60 struct bio_list failures;
143 61
62 struct dm_region_hash *rh;
63 struct dm_kcopyd_client *kcopyd_client;
144 struct dm_io_client *io_client; 64 struct dm_io_client *io_client;
145 mempool_t *read_record_pool; 65 mempool_t *read_record_pool;
146 66
@@ -159,25 +79,14 @@ struct mirror_set {
159 79
160 struct work_struct trigger_event; 80 struct work_struct trigger_event;
161 81
162 unsigned int nr_mirrors; 82 unsigned nr_mirrors;
163 struct mirror mirror[0]; 83 struct mirror mirror[0];
164}; 84};
165 85
166/* 86static void wakeup_mirrord(void *context)
167 * Conversion fns
168 */
169static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
170{
171 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
172}
173
174static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
175{ 87{
176 return region << rh->region_shift; 88 struct mirror_set *ms = context;
177}
178 89
179static void wake(struct mirror_set *ms)
180{
181 queue_work(ms->kmirrord_wq, &ms->kmirrord_work); 90 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
182} 91}
183 92
@@ -186,7 +95,7 @@ static void delayed_wake_fn(unsigned long data)
186 struct mirror_set *ms = (struct mirror_set *) data; 95 struct mirror_set *ms = (struct mirror_set *) data;
187 96
188 clear_bit(0, &ms->timer_pending); 97 clear_bit(0, &ms->timer_pending);
189 wake(ms); 98 wakeup_mirrord(ms);
190} 99}
191 100
192static void delayed_wake(struct mirror_set *ms) 101static void delayed_wake(struct mirror_set *ms)
@@ -200,473 +109,34 @@ static void delayed_wake(struct mirror_set *ms)
200 add_timer(&ms->timer); 109 add_timer(&ms->timer);
201} 110}
202 111
203/* FIXME move this */ 112static void wakeup_all_recovery_waiters(void *context)
204static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
205
206#define MIN_REGIONS 64
207#define MAX_RECOVERY 1
208static int rh_init(struct region_hash *rh, struct mirror_set *ms,
209 struct dm_dirty_log *log, uint32_t region_size,
210 region_t nr_regions)
211{ 113{
212 unsigned int nr_buckets, max_buckets; 114 wake_up_all(&_kmirrord_recovery_stopped);
213 size_t i;
214
215 /*
216 * Calculate a suitable number of buckets for our hash
217 * table.
218 */
219 max_buckets = nr_regions >> 6;
220 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
221 ;
222 nr_buckets >>= 1;
223
224 rh->ms = ms;
225 rh->log = log;
226 rh->region_size = region_size;
227 rh->region_shift = ffs(region_size) - 1;
228 rwlock_init(&rh->hash_lock);
229 rh->mask = nr_buckets - 1;
230 rh->nr_buckets = nr_buckets;
231
232 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
233 if (!rh->buckets) {
234 DMERR("unable to allocate region hash memory");
235 return -ENOMEM;
236 }
237
238 for (i = 0; i < nr_buckets; i++)
239 INIT_LIST_HEAD(rh->buckets + i);
240
241 spin_lock_init(&rh->region_lock);
242 sema_init(&rh->recovery_count, 0);
243 atomic_set(&rh->recovery_in_flight, 0);
244 INIT_LIST_HEAD(&rh->clean_regions);
245 INIT_LIST_HEAD(&rh->quiesced_regions);
246 INIT_LIST_HEAD(&rh->recovered_regions);
247 INIT_LIST_HEAD(&rh->failed_recovered_regions);
248
249 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
250 sizeof(struct region));
251 if (!rh->region_pool) {
252 vfree(rh->buckets);
253 rh->buckets = NULL;
254 return -ENOMEM;
255 }
256
257 return 0;
258} 115}
259 116
260static void rh_exit(struct region_hash *rh) 117static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
261{
262 unsigned int h;
263 struct region *reg, *nreg;
264
265 BUG_ON(!list_empty(&rh->quiesced_regions));
266 for (h = 0; h < rh->nr_buckets; h++) {
267 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
268 BUG_ON(atomic_read(&reg->pending));
269 mempool_free(reg, rh->region_pool);
270 }
271 }
272
273 if (rh->log)
274 dm_dirty_log_destroy(rh->log);
275 if (rh->region_pool)
276 mempool_destroy(rh->region_pool);
277 vfree(rh->buckets);
278}
279
280#define RH_HASH_MULT 2654435387U
281
282static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
283{
284 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
285}
286
287static struct region *__rh_lookup(struct region_hash *rh, region_t region)
288{
289 struct region *reg;
290
291 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
292 if (reg->key == region)
293 return reg;
294
295 return NULL;
296}
297
298static void __rh_insert(struct region_hash *rh, struct region *reg)
299{
300 unsigned int h = rh_hash(rh, reg->key);
301 list_add(&reg->hash_list, rh->buckets + h);
302}
303
304static struct region *__rh_alloc(struct region_hash *rh, region_t region)
305{
306 struct region *reg, *nreg;
307
308 read_unlock(&rh->hash_lock);
309 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
310 if (unlikely(!nreg))
311 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
312 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
313 RH_CLEAN : RH_NOSYNC;
314 nreg->rh = rh;
315 nreg->key = region;
316
317 INIT_LIST_HEAD(&nreg->list);
318
319 atomic_set(&nreg->pending, 0);
320 bio_list_init(&nreg->delayed_bios);
321 write_lock_irq(&rh->hash_lock);
322
323 reg = __rh_lookup(rh, region);
324 if (reg)
325 /* we lost the race */
326 mempool_free(nreg, rh->region_pool);
327
328 else {
329 __rh_insert(rh, nreg);
330 if (nreg->state == RH_CLEAN) {
331 spin_lock(&rh->region_lock);
332 list_add(&nreg->list, &rh->clean_regions);
333 spin_unlock(&rh->region_lock);
334 }
335 reg = nreg;
336 }
337 write_unlock_irq(&rh->hash_lock);
338 read_lock(&rh->hash_lock);
339
340 return reg;
341}
342
343static inline struct region *__rh_find(struct region_hash *rh, region_t region)
344{
345 struct region *reg;
346
347 reg = __rh_lookup(rh, region);
348 if (!reg)
349 reg = __rh_alloc(rh, region);
350
351 return reg;
352}
353
354static int rh_state(struct region_hash *rh, region_t region, int may_block)
355{
356 int r;
357 struct region *reg;
358
359 read_lock(&rh->hash_lock);
360 reg = __rh_lookup(rh, region);
361 read_unlock(&rh->hash_lock);
362
363 if (reg)
364 return reg->state;
365
366 /*
367 * The region wasn't in the hash, so we fall back to the
368 * dirty log.
369 */
370 r = rh->log->type->in_sync(rh->log, region, may_block);
371
372 /*
373 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
374 * taken as a RH_NOSYNC
375 */
376 return r == 1 ? RH_CLEAN : RH_NOSYNC;
377}
378
379static inline int rh_in_sync(struct region_hash *rh,
380 region_t region, int may_block)
381{
382 int state = rh_state(rh, region, may_block);
383 return state == RH_CLEAN || state == RH_DIRTY;
384}
385
386static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
387{
388 struct bio *bio;
389
390 while ((bio = bio_list_pop(bio_list))) {
391 queue_bio(ms, bio, WRITE);
392 }
393}
394
395static void complete_resync_work(struct region *reg, int success)
396{
397 struct region_hash *rh = reg->rh;
398
399 rh->log->type->set_region_sync(rh->log, reg->key, success);
400
401 /*
402 * Dispatch the bios before we call 'wake_up_all'.
403 * This is important because if we are suspending,
404 * we want to know that recovery is complete and
405 * the work queue is flushed. If we wake_up_all
406 * before we dispatch_bios (queue bios and call wake()),
407 * then we risk suspending before the work queue
408 * has been properly flushed.
409 */
410 dispatch_bios(rh->ms, &reg->delayed_bios);
411 if (atomic_dec_and_test(&rh->recovery_in_flight))
412 wake_up_all(&_kmirrord_recovery_stopped);
413 up(&rh->recovery_count);
414}
415
416static void rh_update_states(struct region_hash *rh)
417{
418 struct region *reg, *next;
419
420 LIST_HEAD(clean);
421 LIST_HEAD(recovered);
422 LIST_HEAD(failed_recovered);
423
424 /*
425 * Quickly grab the lists.
426 */
427 write_lock_irq(&rh->hash_lock);
428 spin_lock(&rh->region_lock);
429 if (!list_empty(&rh->clean_regions)) {
430 list_splice_init(&rh->clean_regions, &clean);
431
432 list_for_each_entry(reg, &clean, list)
433 list_del(&reg->hash_list);
434 }
435
436 if (!list_empty(&rh->recovered_regions)) {
437 list_splice_init(&rh->recovered_regions, &recovered);
438
439 list_for_each_entry (reg, &recovered, list)
440 list_del(&reg->hash_list);
441 }
442
443 if (!list_empty(&rh->failed_recovered_regions)) {
444 list_splice_init(&rh->failed_recovered_regions,
445 &failed_recovered);
446
447 list_for_each_entry(reg, &failed_recovered, list)
448 list_del(&reg->hash_list);
449 }
450
451 spin_unlock(&rh->region_lock);
452 write_unlock_irq(&rh->hash_lock);
453
454 /*
455 * All the regions on the recovered and clean lists have
456 * now been pulled out of the system, so no need to do
457 * any more locking.
458 */
459 list_for_each_entry_safe (reg, next, &recovered, list) {
460 rh->log->type->clear_region(rh->log, reg->key);
461 complete_resync_work(reg, 1);
462 mempool_free(reg, rh->region_pool);
463 }
464
465 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
466 complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
467 mempool_free(reg, rh->region_pool);
468 }
469
470 list_for_each_entry_safe(reg, next, &clean, list) {
471 rh->log->type->clear_region(rh->log, reg->key);
472 mempool_free(reg, rh->region_pool);
473 }
474
475 rh->log->type->flush(rh->log);
476}
477
478static void rh_inc(struct region_hash *rh, region_t region)
479{
480 struct region *reg;
481
482 read_lock(&rh->hash_lock);
483 reg = __rh_find(rh, region);
484
485 spin_lock_irq(&rh->region_lock);
486 atomic_inc(&reg->pending);
487
488 if (reg->state == RH_CLEAN) {
489 reg->state = RH_DIRTY;
490 list_del_init(&reg->list); /* take off the clean list */
491 spin_unlock_irq(&rh->region_lock);
492
493 rh->log->type->mark_region(rh->log, reg->key);
494 } else
495 spin_unlock_irq(&rh->region_lock);
496
497
498 read_unlock(&rh->hash_lock);
499}
500
501static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
502{
503 struct bio *bio;
504
505 for (bio = bios->head; bio; bio = bio->bi_next)
506 rh_inc(rh, bio_to_region(rh, bio));
507}
508
509static void rh_dec(struct region_hash *rh, region_t region)
510{ 118{
511 unsigned long flags; 119 unsigned long flags;
512 struct region *reg;
513 int should_wake = 0; 120 int should_wake = 0;
121 struct bio_list *bl;
514 122
515 read_lock(&rh->hash_lock); 123 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
516 reg = __rh_lookup(rh, region); 124 spin_lock_irqsave(&ms->lock, flags);
517 read_unlock(&rh->hash_lock); 125 should_wake = !(bl->head);
518 126 bio_list_add(bl, bio);
519 spin_lock_irqsave(&rh->region_lock, flags); 127 spin_unlock_irqrestore(&ms->lock, flags);
520 if (atomic_dec_and_test(&reg->pending)) {
521 /*
522 * There is no pending I/O for this region.
523 * We can move the region to corresponding list for next action.
524 * At this point, the region is not yet connected to any list.
525 *
526 * If the state is RH_NOSYNC, the region should be kept off
527 * from clean list.
528 * The hash entry for RH_NOSYNC will remain in memory
529 * until the region is recovered or the map is reloaded.
530 */
531
532 /* do nothing for RH_NOSYNC */
533 if (reg->state == RH_RECOVERING) {
534 list_add_tail(&reg->list, &rh->quiesced_regions);
535 } else if (reg->state == RH_DIRTY) {
536 reg->state = RH_CLEAN;
537 list_add(&reg->list, &rh->clean_regions);
538 }
539 should_wake = 1;
540 }
541 spin_unlock_irqrestore(&rh->region_lock, flags);
542 128
543 if (should_wake) 129 if (should_wake)
544 wake(rh->ms); 130 wakeup_mirrord(ms);
545}
546
547/*
548 * Starts quiescing a region in preparation for recovery.
549 */
550static int __rh_recovery_prepare(struct region_hash *rh)
551{
552 int r;
553 struct region *reg;
554 region_t region;
555
556 /*
557 * Ask the dirty log what's next.
558 */
559 r = rh->log->type->get_resync_work(rh->log, &region);
560 if (r <= 0)
561 return r;
562
563 /*
564 * Get this region, and start it quiescing by setting the
565 * recovering flag.
566 */
567 read_lock(&rh->hash_lock);
568 reg = __rh_find(rh, region);
569 read_unlock(&rh->hash_lock);
570
571 spin_lock_irq(&rh->region_lock);
572 reg->state = RH_RECOVERING;
573
574 /* Already quiesced ? */
575 if (atomic_read(&reg->pending))
576 list_del_init(&reg->list);
577 else
578 list_move(&reg->list, &rh->quiesced_regions);
579
580 spin_unlock_irq(&rh->region_lock);
581
582 return 1;
583}
584
585static void rh_recovery_prepare(struct region_hash *rh)
586{
587 /* Extra reference to avoid race with rh_stop_recovery */
588 atomic_inc(&rh->recovery_in_flight);
589
590 while (!down_trylock(&rh->recovery_count)) {
591 atomic_inc(&rh->recovery_in_flight);
592 if (__rh_recovery_prepare(rh) <= 0) {
593 atomic_dec(&rh->recovery_in_flight);
594 up(&rh->recovery_count);
595 break;
596 }
597 }
598
599 /* Drop the extra reference */
600 if (atomic_dec_and_test(&rh->recovery_in_flight))
601 wake_up_all(&_kmirrord_recovery_stopped);
602}
603
604/*
605 * Returns any quiesced regions.
606 */
607static struct region *rh_recovery_start(struct region_hash *rh)
608{
609 struct region *reg = NULL;
610
611 spin_lock_irq(&rh->region_lock);
612 if (!list_empty(&rh->quiesced_regions)) {
613 reg = list_entry(rh->quiesced_regions.next,
614 struct region, list);
615 list_del_init(&reg->list); /* remove from the quiesced list */
616 }
617 spin_unlock_irq(&rh->region_lock);
618
619 return reg;
620}
621
622static void rh_recovery_end(struct region *reg, int success)
623{
624 struct region_hash *rh = reg->rh;
625
626 spin_lock_irq(&rh->region_lock);
627 if (success)
628 list_add(&reg->list, &reg->rh->recovered_regions);
629 else {
630 reg->state = RH_NOSYNC;
631 list_add(&reg->list, &reg->rh->failed_recovered_regions);
632 }
633 spin_unlock_irq(&rh->region_lock);
634
635 wake(rh->ms);
636} 131}
637 132
638static int rh_flush(struct region_hash *rh) 133static void dispatch_bios(void *context, struct bio_list *bio_list)
639{ 134{
640 return rh->log->type->flush(rh->log); 135 struct mirror_set *ms = context;
641} 136 struct bio *bio;
642
643static void rh_delay(struct region_hash *rh, struct bio *bio)
644{
645 struct region *reg;
646
647 read_lock(&rh->hash_lock);
648 reg = __rh_find(rh, bio_to_region(rh, bio));
649 bio_list_add(&reg->delayed_bios, bio);
650 read_unlock(&rh->hash_lock);
651}
652
653static void rh_stop_recovery(struct region_hash *rh)
654{
655 int i;
656
657 /* wait for any recovering regions */
658 for (i = 0; i < MAX_RECOVERY; i++)
659 down(&rh->recovery_count);
660}
661
662static void rh_start_recovery(struct region_hash *rh)
663{
664 int i;
665
666 for (i = 0; i < MAX_RECOVERY; i++)
667 up(&rh->recovery_count);
668 137
669 wake(rh->ms); 138 while ((bio = bio_list_pop(bio_list)))
139 queue_bio(ms, bio, WRITE);
670} 140}
671 141
672#define MIN_READ_RECORDS 20 142#define MIN_READ_RECORDS 20
@@ -776,8 +246,8 @@ out:
776static void recovery_complete(int read_err, unsigned long write_err, 246static void recovery_complete(int read_err, unsigned long write_err,
777 void *context) 247 void *context)
778{ 248{
779 struct region *reg = (struct region *)context; 249 struct dm_region *reg = context;
780 struct mirror_set *ms = reg->rh->ms; 250 struct mirror_set *ms = dm_rh_region_context(reg);
781 int m, bit = 0; 251 int m, bit = 0;
782 252
783 if (read_err) { 253 if (read_err) {
@@ -803,31 +273,33 @@ static void recovery_complete(int read_err, unsigned long write_err,
803 } 273 }
804 } 274 }
805 275
806 rh_recovery_end(reg, !(read_err || write_err)); 276 dm_rh_recovery_end(reg, !(read_err || write_err));
807} 277}
808 278
809static int recover(struct mirror_set *ms, struct region *reg) 279static int recover(struct mirror_set *ms, struct dm_region *reg)
810{ 280{
811 int r; 281 int r;
812 unsigned int i; 282 unsigned i;
813 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; 283 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
814 struct mirror *m; 284 struct mirror *m;
815 unsigned long flags = 0; 285 unsigned long flags = 0;
286 region_t key = dm_rh_get_region_key(reg);
287 sector_t region_size = dm_rh_get_region_size(ms->rh);
816 288
817 /* fill in the source */ 289 /* fill in the source */
818 m = get_default_mirror(ms); 290 m = get_default_mirror(ms);
819 from.bdev = m->dev->bdev; 291 from.bdev = m->dev->bdev;
820 from.sector = m->offset + region_to_sector(reg->rh, reg->key); 292 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
821 if (reg->key == (ms->nr_regions - 1)) { 293 if (key == (ms->nr_regions - 1)) {
822 /* 294 /*
823 * The final region may be smaller than 295 * The final region may be smaller than
824 * region_size. 296 * region_size.
825 */ 297 */
826 from.count = ms->ti->len & (reg->rh->region_size - 1); 298 from.count = ms->ti->len & (region_size - 1);
827 if (!from.count) 299 if (!from.count)
828 from.count = reg->rh->region_size; 300 from.count = region_size;
829 } else 301 } else
830 from.count = reg->rh->region_size; 302 from.count = region_size;
831 303
832 /* fill in the destinations */ 304 /* fill in the destinations */
833 for (i = 0, dest = to; i < ms->nr_mirrors; i++) { 305 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
@@ -836,7 +308,7 @@ static int recover(struct mirror_set *ms, struct region *reg)
836 308
837 m = ms->mirror + i; 309 m = ms->mirror + i;
838 dest->bdev = m->dev->bdev; 310 dest->bdev = m->dev->bdev;
839 dest->sector = m->offset + region_to_sector(reg->rh, reg->key); 311 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
840 dest->count = from.count; 312 dest->count = from.count;
841 dest++; 313 dest++;
842 } 314 }
@@ -853,22 +325,22 @@ static int recover(struct mirror_set *ms, struct region *reg)
853 325
854static void do_recovery(struct mirror_set *ms) 326static void do_recovery(struct mirror_set *ms)
855{ 327{
328 struct dm_region *reg;
329 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
856 int r; 330 int r;
857 struct region *reg;
858 struct dm_dirty_log *log = ms->rh.log;
859 331
860 /* 332 /*
861 * Start quiescing some regions. 333 * Start quiescing some regions.
862 */ 334 */
863 rh_recovery_prepare(&ms->rh); 335 dm_rh_recovery_prepare(ms->rh);
864 336
865 /* 337 /*
866 * Copy any already quiesced regions. 338 * Copy any already quiesced regions.
867 */ 339 */
868 while ((reg = rh_recovery_start(&ms->rh))) { 340 while ((reg = dm_rh_recovery_start(ms->rh))) {
869 r = recover(ms, reg); 341 r = recover(ms, reg);
870 if (r) 342 if (r)
871 rh_recovery_end(reg, 0); 343 dm_rh_recovery_end(reg, 0);
872 } 344 }
873 345
874 /* 346 /*
@@ -909,9 +381,10 @@ static int default_ok(struct mirror *m)
909 381
910static int mirror_available(struct mirror_set *ms, struct bio *bio) 382static int mirror_available(struct mirror_set *ms, struct bio *bio)
911{ 383{
912 region_t region = bio_to_region(&ms->rh, bio); 384 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
385 region_t region = dm_rh_bio_to_region(ms->rh, bio);
913 386
914 if (ms->rh.log->type->in_sync(ms->rh.log, region, 0)) 387 if (log->type->in_sync(log, region, 0))
915 return choose_mirror(ms, bio->bi_sector) ? 1 : 0; 388 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
916 389
917 return 0; 390 return 0;
@@ -985,7 +458,14 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
985 458
986 map_region(&io, m, bio); 459 map_region(&io, m, bio);
987 bio_set_m(bio, m); 460 bio_set_m(bio, m);
988 (void) dm_io(&io_req, 1, &io, NULL); 461 BUG_ON(dm_io(&io_req, 1, &io, NULL));
462}
463
464static inline int region_in_sync(struct mirror_set *ms, region_t region,
465 int may_block)
466{
467 int state = dm_rh_get_state(ms->rh, region, may_block);
468 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
989} 469}
990 470
991static void do_reads(struct mirror_set *ms, struct bio_list *reads) 471static void do_reads(struct mirror_set *ms, struct bio_list *reads)
@@ -995,13 +475,13 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
995 struct mirror *m; 475 struct mirror *m;
996 476
997 while ((bio = bio_list_pop(reads))) { 477 while ((bio = bio_list_pop(reads))) {
998 region = bio_to_region(&ms->rh, bio); 478 region = dm_rh_bio_to_region(ms->rh, bio);
999 m = get_default_mirror(ms); 479 m = get_default_mirror(ms);
1000 480
1001 /* 481 /*
1002 * We can only read balance if the region is in sync. 482 * We can only read balance if the region is in sync.
1003 */ 483 */
1004 if (likely(rh_in_sync(&ms->rh, region, 1))) 484 if (likely(region_in_sync(ms, region, 1)))
1005 m = choose_mirror(ms, bio->bi_sector); 485 m = choose_mirror(ms, bio->bi_sector);
1006 else if (m && atomic_read(&m->error_count)) 486 else if (m && atomic_read(&m->error_count))
1007 m = NULL; 487 m = NULL;
@@ -1024,57 +504,6 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
1024 * NOSYNC: increment pending, just write to the default mirror 504 * NOSYNC: increment pending, just write to the default mirror
1025 *---------------------------------------------------------------*/ 505 *---------------------------------------------------------------*/
1026 506
1027/* __bio_mark_nosync
1028 * @ms
1029 * @bio
1030 * @done
1031 * @error
1032 *
1033 * The bio was written on some mirror(s) but failed on other mirror(s).
1034 * We can successfully endio the bio but should avoid the region being
1035 * marked clean by setting the state RH_NOSYNC.
1036 *
1037 * This function is _not_ safe in interrupt context!
1038 */
1039static void __bio_mark_nosync(struct mirror_set *ms,
1040 struct bio *bio, unsigned done, int error)
1041{
1042 unsigned long flags;
1043 struct region_hash *rh = &ms->rh;
1044 struct dm_dirty_log *log = ms->rh.log;
1045 struct region *reg;
1046 region_t region = bio_to_region(rh, bio);
1047 int recovering = 0;
1048
1049 /* We must inform the log that the sync count has changed. */
1050 log->type->set_region_sync(log, region, 0);
1051 ms->in_sync = 0;
1052
1053 read_lock(&rh->hash_lock);
1054 reg = __rh_find(rh, region);
1055 read_unlock(&rh->hash_lock);
1056
1057 /* region hash entry should exist because write was in-flight */
1058 BUG_ON(!reg);
1059 BUG_ON(!list_empty(&reg->list));
1060
1061 spin_lock_irqsave(&rh->region_lock, flags);
1062 /*
1063 * Possible cases:
1064 * 1) RH_DIRTY
1065 * 2) RH_NOSYNC: was dirty, other preceeding writes failed
1066 * 3) RH_RECOVERING: flushing pending writes
1067 * Either case, the region should have not been connected to list.
1068 */
1069 recovering = (reg->state == RH_RECOVERING);
1070 reg->state = RH_NOSYNC;
1071 BUG_ON(!list_empty(&reg->list));
1072 spin_unlock_irqrestore(&rh->region_lock, flags);
1073
1074 bio_endio(bio, error);
1075 if (recovering)
1076 complete_resync_work(reg, 0);
1077}
1078 507
1079static void write_callback(unsigned long error, void *context) 508static void write_callback(unsigned long error, void *context)
1080{ 509{
@@ -1119,7 +548,7 @@ static void write_callback(unsigned long error, void *context)
1119 bio_list_add(&ms->failures, bio); 548 bio_list_add(&ms->failures, bio);
1120 spin_unlock_irqrestore(&ms->lock, flags); 549 spin_unlock_irqrestore(&ms->lock, flags);
1121 if (should_wake) 550 if (should_wake)
1122 wake(ms); 551 wakeup_mirrord(ms);
1123 return; 552 return;
1124 } 553 }
1125out: 554out:
@@ -1149,7 +578,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
1149 */ 578 */
1150 bio_set_m(bio, get_default_mirror(ms)); 579 bio_set_m(bio, get_default_mirror(ms));
1151 580
1152 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL); 581 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
1153} 582}
1154 583
1155static void do_writes(struct mirror_set *ms, struct bio_list *writes) 584static void do_writes(struct mirror_set *ms, struct bio_list *writes)
@@ -1169,18 +598,19 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
1169 bio_list_init(&recover); 598 bio_list_init(&recover);
1170 599
1171 while ((bio = bio_list_pop(writes))) { 600 while ((bio = bio_list_pop(writes))) {
1172 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1); 601 state = dm_rh_get_state(ms->rh,
602 dm_rh_bio_to_region(ms->rh, bio), 1);
1173 switch (state) { 603 switch (state) {
1174 case RH_CLEAN: 604 case DM_RH_CLEAN:
1175 case RH_DIRTY: 605 case DM_RH_DIRTY:
1176 this_list = &sync; 606 this_list = &sync;
1177 break; 607 break;
1178 608
1179 case RH_NOSYNC: 609 case DM_RH_NOSYNC:
1180 this_list = &nosync; 610 this_list = &nosync;
1181 break; 611 break;
1182 612
1183 case RH_RECOVERING: 613 case DM_RH_RECOVERING:
1184 this_list = &recover; 614 this_list = &recover;
1185 break; 615 break;
1186 } 616 }
@@ -1193,9 +623,9 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
1193 * be written to (writes to recover regions are going to 623 * be written to (writes to recover regions are going to
1194 * be delayed). 624 * be delayed).
1195 */ 625 */
1196 rh_inc_pending(&ms->rh, &sync); 626 dm_rh_inc_pending(ms->rh, &sync);
1197 rh_inc_pending(&ms->rh, &nosync); 627 dm_rh_inc_pending(ms->rh, &nosync);
1198 ms->log_failure = rh_flush(&ms->rh) ? 1 : 0; 628 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0;
1199 629
1200 /* 630 /*
1201 * Dispatch io. 631 * Dispatch io.
@@ -1204,13 +634,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
1204 spin_lock_irq(&ms->lock); 634 spin_lock_irq(&ms->lock);
1205 bio_list_merge(&ms->failures, &sync); 635 bio_list_merge(&ms->failures, &sync);
1206 spin_unlock_irq(&ms->lock); 636 spin_unlock_irq(&ms->lock);
1207 wake(ms); 637 wakeup_mirrord(ms);
1208 } else 638 } else
1209 while ((bio = bio_list_pop(&sync))) 639 while ((bio = bio_list_pop(&sync)))
1210 do_write(ms, bio); 640 do_write(ms, bio);
1211 641
1212 while ((bio = bio_list_pop(&recover))) 642 while ((bio = bio_list_pop(&recover)))
1213 rh_delay(&ms->rh, bio); 643 dm_rh_delay(ms->rh, bio);
1214 644
1215 while ((bio = bio_list_pop(&nosync))) { 645 while ((bio = bio_list_pop(&nosync))) {
1216 map_bio(get_default_mirror(ms), bio); 646 map_bio(get_default_mirror(ms), bio);
@@ -1227,7 +657,8 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures)
1227 657
1228 if (!ms->log_failure) { 658 if (!ms->log_failure) {
1229 while ((bio = bio_list_pop(failures))) 659 while ((bio = bio_list_pop(failures)))
1230 __bio_mark_nosync(ms, bio, bio->bi_size, 0); 660 ms->in_sync = 0;
661 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
1231 return; 662 return;
1232 } 663 }
1233 664
@@ -1280,8 +711,8 @@ static void trigger_event(struct work_struct *work)
1280 *---------------------------------------------------------------*/ 711 *---------------------------------------------------------------*/
1281static void do_mirror(struct work_struct *work) 712static void do_mirror(struct work_struct *work)
1282{ 713{
1283 struct mirror_set *ms =container_of(work, struct mirror_set, 714 struct mirror_set *ms = container_of(work, struct mirror_set,
1284 kmirrord_work); 715 kmirrord_work);
1285 struct bio_list reads, writes, failures; 716 struct bio_list reads, writes, failures;
1286 unsigned long flags; 717 unsigned long flags;
1287 718
@@ -1294,7 +725,7 @@ static void do_mirror(struct work_struct *work)
1294 bio_list_init(&ms->failures); 725 bio_list_init(&ms->failures);
1295 spin_unlock_irqrestore(&ms->lock, flags); 726 spin_unlock_irqrestore(&ms->lock, flags);
1296 727
1297 rh_update_states(&ms->rh); 728 dm_rh_update_states(ms->rh, errors_handled(ms));
1298 do_recovery(ms); 729 do_recovery(ms);
1299 do_reads(ms, &reads); 730 do_reads(ms, &reads);
1300 do_writes(ms, &writes); 731 do_writes(ms, &writes);
@@ -1303,7 +734,6 @@ static void do_mirror(struct work_struct *work)
1303 dm_table_unplug_all(ms->ti->table); 734 dm_table_unplug_all(ms->ti->table);
1304} 735}
1305 736
1306
1307/*----------------------------------------------------------------- 737/*-----------------------------------------------------------------
1308 * Target functions 738 * Target functions
1309 *---------------------------------------------------------------*/ 739 *---------------------------------------------------------------*/
@@ -1315,9 +745,6 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
1315 size_t len; 745 size_t len;
1316 struct mirror_set *ms = NULL; 746 struct mirror_set *ms = NULL;
1317 747
1318 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
1319 return NULL;
1320
1321 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); 748 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
1322 749
1323 ms = kzalloc(len, GFP_KERNEL); 750 ms = kzalloc(len, GFP_KERNEL);
@@ -1353,7 +780,11 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
1353 return NULL; 780 return NULL;
1354 } 781 }
1355 782
1356 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { 783 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
784 wakeup_all_recovery_waiters,
785 ms->ti->begin, MAX_RECOVERY,
786 dl, region_size, ms->nr_regions);
787 if (IS_ERR(ms->rh)) {
1357 ti->error = "Error creating dirty region hash"; 788 ti->error = "Error creating dirty region hash";
1358 dm_io_client_destroy(ms->io_client); 789 dm_io_client_destroy(ms->io_client);
1359 mempool_destroy(ms->read_record_pool); 790 mempool_destroy(ms->read_record_pool);
@@ -1371,7 +802,7 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti,
1371 dm_put_device(ti, ms->mirror[m].dev); 802 dm_put_device(ti, ms->mirror[m].dev);
1372 803
1373 dm_io_client_destroy(ms->io_client); 804 dm_io_client_destroy(ms->io_client);
1374 rh_exit(&ms->rh); 805 dm_region_hash_destroy(ms->rh);
1375 mempool_destroy(ms->read_record_pool); 806 mempool_destroy(ms->read_record_pool);
1376 kfree(ms); 807 kfree(ms);
1377} 808}
@@ -1411,10 +842,10 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1411 * Create dirty log: log_type #log_params <log_params> 842 * Create dirty log: log_type #log_params <log_params>
1412 */ 843 */
1413static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, 844static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
1414 unsigned int argc, char **argv, 845 unsigned argc, char **argv,
1415 unsigned int *args_used) 846 unsigned *args_used)
1416{ 847{
1417 unsigned int param_count; 848 unsigned param_count;
1418 struct dm_dirty_log *dl; 849 struct dm_dirty_log *dl;
1419 850
1420 if (argc < 2) { 851 if (argc < 2) {
@@ -1545,7 +976,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1545 } 976 }
1546 977
1547 ti->private = ms; 978 ti->private = ms;
1548 ti->split_io = ms->rh.region_size; 979 ti->split_io = dm_rh_get_region_size(ms->rh);
1549 980
1550 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); 981 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1551 if (!ms->kmirrord_wq) { 982 if (!ms->kmirrord_wq) {
@@ -1580,11 +1011,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1580 goto err_destroy_wq; 1011 goto err_destroy_wq;
1581 } 1012 }
1582 1013
1583 r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); 1014 r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
1584 if (r) 1015 if (r)
1585 goto err_destroy_wq; 1016 goto err_destroy_wq;
1586 1017
1587 wake(ms); 1018 wakeup_mirrord(ms);
1588 return 0; 1019 return 0;
1589 1020
1590err_destroy_wq: 1021err_destroy_wq:
@@ -1605,22 +1036,6 @@ static void mirror_dtr(struct dm_target *ti)
1605 free_context(ms, ti, ms->nr_mirrors); 1036 free_context(ms, ti, ms->nr_mirrors);
1606} 1037}
1607 1038
1608static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1609{
1610 unsigned long flags;
1611 int should_wake = 0;
1612 struct bio_list *bl;
1613
1614 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1615 spin_lock_irqsave(&ms->lock, flags);
1616 should_wake = !(bl->head);
1617 bio_list_add(bl, bio);
1618 spin_unlock_irqrestore(&ms->lock, flags);
1619
1620 if (should_wake)
1621 wake(ms);
1622}
1623
1624/* 1039/*
1625 * Mirror mapping function 1040 * Mirror mapping function
1626 */ 1041 */
@@ -1631,16 +1046,16 @@ static int mirror_map(struct dm_target *ti, struct bio *bio,
1631 struct mirror *m; 1046 struct mirror *m;
1632 struct mirror_set *ms = ti->private; 1047 struct mirror_set *ms = ti->private;
1633 struct dm_raid1_read_record *read_record = NULL; 1048 struct dm_raid1_read_record *read_record = NULL;
1049 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1634 1050
1635 if (rw == WRITE) { 1051 if (rw == WRITE) {
1636 /* Save region for mirror_end_io() handler */ 1052 /* Save region for mirror_end_io() handler */
1637 map_context->ll = bio_to_region(&ms->rh, bio); 1053 map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
1638 queue_bio(ms, bio, rw); 1054 queue_bio(ms, bio, rw);
1639 return DM_MAPIO_SUBMITTED; 1055 return DM_MAPIO_SUBMITTED;
1640 } 1056 }
1641 1057
1642 r = ms->rh.log->type->in_sync(ms->rh.log, 1058 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1643 bio_to_region(&ms->rh, bio), 0);
1644 if (r < 0 && r != -EWOULDBLOCK) 1059 if (r < 0 && r != -EWOULDBLOCK)
1645 return r; 1060 return r;
1646 1061
@@ -1688,7 +1103,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1688 * We need to dec pending if this was a write. 1103 * We need to dec pending if this was a write.
1689 */ 1104 */
1690 if (rw == WRITE) { 1105 if (rw == WRITE) {
1691 rh_dec(&ms->rh, map_context->ll); 1106 dm_rh_dec(ms->rh, map_context->ll);
1692 return error; 1107 return error;
1693 } 1108 }
1694 1109
@@ -1744,7 +1159,7 @@ out:
1744static void mirror_presuspend(struct dm_target *ti) 1159static void mirror_presuspend(struct dm_target *ti)
1745{ 1160{
1746 struct mirror_set *ms = (struct mirror_set *) ti->private; 1161 struct mirror_set *ms = (struct mirror_set *) ti->private;
1747 struct dm_dirty_log *log = ms->rh.log; 1162 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1748 1163
1749 atomic_set(&ms->suspend, 1); 1164 atomic_set(&ms->suspend, 1);
1750 1165
@@ -1752,10 +1167,10 @@ static void mirror_presuspend(struct dm_target *ti)
1752 * We must finish up all the work that we've 1167 * We must finish up all the work that we've
1753 * generated (i.e. recovery work). 1168 * generated (i.e. recovery work).
1754 */ 1169 */
1755 rh_stop_recovery(&ms->rh); 1170 dm_rh_stop_recovery(ms->rh);
1756 1171
1757 wait_event(_kmirrord_recovery_stopped, 1172 wait_event(_kmirrord_recovery_stopped,
1758 !atomic_read(&ms->rh.recovery_in_flight)); 1173 !dm_rh_recovery_in_flight(ms->rh));
1759 1174
1760 if (log->type->presuspend && log->type->presuspend(log)) 1175 if (log->type->presuspend && log->type->presuspend(log))
1761 /* FIXME: need better error handling */ 1176 /* FIXME: need better error handling */
@@ -1773,7 +1188,7 @@ static void mirror_presuspend(struct dm_target *ti)
1773static void mirror_postsuspend(struct dm_target *ti) 1188static void mirror_postsuspend(struct dm_target *ti)
1774{ 1189{
1775 struct mirror_set *ms = ti->private; 1190 struct mirror_set *ms = ti->private;
1776 struct dm_dirty_log *log = ms->rh.log; 1191 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1777 1192
1778 if (log->type->postsuspend && log->type->postsuspend(log)) 1193 if (log->type->postsuspend && log->type->postsuspend(log))
1779 /* FIXME: need better error handling */ 1194 /* FIXME: need better error handling */
@@ -1783,13 +1198,13 @@ static void mirror_postsuspend(struct dm_target *ti)
1783static void mirror_resume(struct dm_target *ti) 1198static void mirror_resume(struct dm_target *ti)
1784{ 1199{
1785 struct mirror_set *ms = ti->private; 1200 struct mirror_set *ms = ti->private;
1786 struct dm_dirty_log *log = ms->rh.log; 1201 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1787 1202
1788 atomic_set(&ms->suspend, 0); 1203 atomic_set(&ms->suspend, 0);
1789 if (log->type->resume && log->type->resume(log)) 1204 if (log->type->resume && log->type->resume(log))
1790 /* FIXME: need better error handling */ 1205 /* FIXME: need better error handling */
1791 DMWARN("log resume failed"); 1206 DMWARN("log resume failed");
1792 rh_start_recovery(&ms->rh); 1207 dm_rh_start_recovery(ms->rh);
1793} 1208}
1794 1209
1795/* 1210/*
@@ -1821,7 +1236,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1821{ 1236{
1822 unsigned int m, sz = 0; 1237 unsigned int m, sz = 0;
1823 struct mirror_set *ms = (struct mirror_set *) ti->private; 1238 struct mirror_set *ms = (struct mirror_set *) ti->private;
1824 struct dm_dirty_log *log = ms->rh.log; 1239 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1825 char buffer[ms->nr_mirrors + 1]; 1240 char buffer[ms->nr_mirrors + 1];
1826 1241
1827 switch (type) { 1242 switch (type) {
@@ -1834,15 +1249,15 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
1834 buffer[m] = '\0'; 1249 buffer[m] = '\0';
1835 1250
1836 DMEMIT("%llu/%llu 1 %s ", 1251 DMEMIT("%llu/%llu 1 %s ",
1837 (unsigned long long)log->type->get_sync_count(ms->rh.log), 1252 (unsigned long long)log->type->get_sync_count(log),
1838 (unsigned long long)ms->nr_regions, buffer); 1253 (unsigned long long)ms->nr_regions, buffer);
1839 1254
1840 sz += log->type->status(ms->rh.log, type, result+sz, maxlen-sz); 1255 sz += log->type->status(log, type, result+sz, maxlen-sz);
1841 1256
1842 break; 1257 break;
1843 1258
1844 case STATUSTYPE_TABLE: 1259 case STATUSTYPE_TABLE:
1845 sz = log->type->status(ms->rh.log, type, result, maxlen); 1260 sz = log->type->status(log, type, result, maxlen);
1846 1261
1847 DMEMIT("%d", ms->nr_mirrors); 1262 DMEMIT("%d", ms->nr_mirrors);
1848 for (m = 0; m < ms->nr_mirrors; m++) 1263 for (m = 0; m < ms->nr_mirrors; m++)
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
new file mode 100644
index 000000000000..59f8d9df9e1a
--- /dev/null
+++ b/drivers/md/dm-region-hash.c
@@ -0,0 +1,704 @@
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include <linux/dm-dirty-log.h>
9#include <linux/dm-region-hash.h>
10
11#include <linux/ctype.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/vmalloc.h>
15
16#include "dm.h"
17#include "dm-bio-list.h"
18
19#define DM_MSG_PREFIX "region hash"
20
21/*-----------------------------------------------------------------
22 * Region hash
23 *
24 * The mirror splits itself up into discrete regions. Each
25 * region can be in one of three states: clean, dirty,
26 * nosync. There is no need to put clean regions in the hash.
27 *
28 * In addition to being present in the hash table a region _may_
29 * be present on one of three lists.
30 *
31 * clean_regions: Regions on this list have no io pending to
32 * them, they are in sync, we are no longer interested in them,
33 * they are dull. dm_rh_update_states() will remove them from the
34 * hash table.
35 *
36 * quiesced_regions: These regions have been spun down, ready
37 * for recovery. rh_recovery_start() will remove regions from
38 * this list and hand them to kmirrord, which will schedule the
39 * recovery io with kcopyd.
40 *
41 * recovered_regions: Regions that kcopyd has successfully
42 * recovered. dm_rh_update_states() will now schedule any delayed
43 * io, up the recovery_count, and remove the region from the
44 * hash.
45 *
46 * There are 2 locks:
47 * A rw spin lock 'hash_lock' protects just the hash table,
48 * this is never held in write mode from interrupt context,
49 * which I believe means that we only have to disable irqs when
50 * doing a write lock.
51 *
52 * An ordinary spin lock 'region_lock' that protects the three
53 * lists in the region_hash, with the 'state', 'list' and
54 * 'delayed_bios' fields of the regions. This is used from irq
55 * context, so all other uses will have to suspend local irqs.
56 *---------------------------------------------------------------*/
57struct dm_region_hash {
58 uint32_t region_size;
59 unsigned region_shift;
60
61 /* holds persistent region state */
62 struct dm_dirty_log *log;
63
64 /* hash table */
65 rwlock_t hash_lock;
66 mempool_t *region_pool;
67 unsigned mask;
68 unsigned nr_buckets;
69 unsigned prime;
70 unsigned shift;
71 struct list_head *buckets;
72
73 unsigned max_recovery; /* Max # of regions to recover in parallel */
74
75 spinlock_t region_lock;
76 atomic_t recovery_in_flight;
77 struct semaphore recovery_count;
78 struct list_head clean_regions;
79 struct list_head quiesced_regions;
80 struct list_head recovered_regions;
81 struct list_head failed_recovered_regions;
82
83 void *context;
84 sector_t target_begin;
85
86 /* Callback function to schedule bios writes */
87 void (*dispatch_bios)(void *context, struct bio_list *bios);
88
89 /* Callback function to wakeup callers worker thread. */
90 void (*wakeup_workers)(void *context);
91
92 /* Callback function to wakeup callers recovery waiters. */
93 void (*wakeup_all_recovery_waiters)(void *context);
94};
95
96struct dm_region {
97 struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */
98 region_t key;
99 int state;
100
101 struct list_head hash_list;
102 struct list_head list;
103
104 atomic_t pending;
105 struct bio_list delayed_bios;
106};
107
108/*
109 * Conversion fns
110 */
111static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
112{
113 return sector >> rh->region_shift;
114}
115
116sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
117{
118 return region << rh->region_shift;
119}
120EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
121
122region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
123{
124 return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
125}
126EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
127
128void *dm_rh_region_context(struct dm_region *reg)
129{
130 return reg->rh->context;
131}
132EXPORT_SYMBOL_GPL(dm_rh_region_context);
133
134region_t dm_rh_get_region_key(struct dm_region *reg)
135{
136 return reg->key;
137}
138EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
139
140sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
141{
142 return rh->region_size;
143}
144EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
145
146/*
147 * FIXME: shall we pass in a structure instead of all these args to
148 * dm_region_hash_create()????
149 */
150#define RH_HASH_MULT 2654435387U
151#define RH_HASH_SHIFT 12
152
153#define MIN_REGIONS 64
154struct dm_region_hash *dm_region_hash_create(
155 void *context, void (*dispatch_bios)(void *context,
156 struct bio_list *bios),
157 void (*wakeup_workers)(void *context),
158 void (*wakeup_all_recovery_waiters)(void *context),
159 sector_t target_begin, unsigned max_recovery,
160 struct dm_dirty_log *log, uint32_t region_size,
161 region_t nr_regions)
162{
163 struct dm_region_hash *rh;
164 unsigned nr_buckets, max_buckets;
165 size_t i;
166
167 /*
168 * Calculate a suitable number of buckets for our hash
169 * table.
170 */
171 max_buckets = nr_regions >> 6;
172 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
173 ;
174 nr_buckets >>= 1;
175
176 rh = kmalloc(sizeof(*rh), GFP_KERNEL);
177 if (!rh) {
178 DMERR("unable to allocate region hash memory");
179 return ERR_PTR(-ENOMEM);
180 }
181
182 rh->context = context;
183 rh->dispatch_bios = dispatch_bios;
184 rh->wakeup_workers = wakeup_workers;
185 rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
186 rh->target_begin = target_begin;
187 rh->max_recovery = max_recovery;
188 rh->log = log;
189 rh->region_size = region_size;
190 rh->region_shift = ffs(region_size) - 1;
191 rwlock_init(&rh->hash_lock);
192 rh->mask = nr_buckets - 1;
193 rh->nr_buckets = nr_buckets;
194
195 rh->shift = RH_HASH_SHIFT;
196 rh->prime = RH_HASH_MULT;
197
198 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
199 if (!rh->buckets) {
200 DMERR("unable to allocate region hash bucket memory");
201 kfree(rh);
202 return ERR_PTR(-ENOMEM);
203 }
204
205 for (i = 0; i < nr_buckets; i++)
206 INIT_LIST_HEAD(rh->buckets + i);
207
208 spin_lock_init(&rh->region_lock);
209 sema_init(&rh->recovery_count, 0);
210 atomic_set(&rh->recovery_in_flight, 0);
211 INIT_LIST_HEAD(&rh->clean_regions);
212 INIT_LIST_HEAD(&rh->quiesced_regions);
213 INIT_LIST_HEAD(&rh->recovered_regions);
214 INIT_LIST_HEAD(&rh->failed_recovered_regions);
215
216 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
217 sizeof(struct dm_region));
218 if (!rh->region_pool) {
219 vfree(rh->buckets);
220 kfree(rh);
221 rh = ERR_PTR(-ENOMEM);
222 }
223
224 return rh;
225}
226EXPORT_SYMBOL_GPL(dm_region_hash_create);
227
228void dm_region_hash_destroy(struct dm_region_hash *rh)
229{
230 unsigned h;
231 struct dm_region *reg, *nreg;
232
233 BUG_ON(!list_empty(&rh->quiesced_regions));
234 for (h = 0; h < rh->nr_buckets; h++) {
235 list_for_each_entry_safe(reg, nreg, rh->buckets + h,
236 hash_list) {
237 BUG_ON(atomic_read(&reg->pending));
238 mempool_free(reg, rh->region_pool);
239 }
240 }
241
242 if (rh->log)
243 dm_dirty_log_destroy(rh->log);
244
245 if (rh->region_pool)
246 mempool_destroy(rh->region_pool);
247
248 vfree(rh->buckets);
249 kfree(rh);
250}
251EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
252
253struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
254{
255 return rh->log;
256}
257EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
258
259static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
260{
261 return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
262}
263
264static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
265{
266 struct dm_region *reg;
267 struct list_head *bucket = rh->buckets + rh_hash(rh, region);
268
269 list_for_each_entry(reg, bucket, hash_list)
270 if (reg->key == region)
271 return reg;
272
273 return NULL;
274}
275
276static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
277{
278 list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
279}
280
281static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
282{
283 struct dm_region *reg, *nreg;
284
285 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
286 if (unlikely(!nreg))
287 nreg = kmalloc(sizeof(*nreg), GFP_NOIO);
288
289 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
290 DM_RH_CLEAN : DM_RH_NOSYNC;
291 nreg->rh = rh;
292 nreg->key = region;
293 INIT_LIST_HEAD(&nreg->list);
294 atomic_set(&nreg->pending, 0);
295 bio_list_init(&nreg->delayed_bios);
296
297 write_lock_irq(&rh->hash_lock);
298 reg = __rh_lookup(rh, region);
299 if (reg)
300 /* We lost the race. */
301 mempool_free(nreg, rh->region_pool);
302 else {
303 __rh_insert(rh, nreg);
304 if (nreg->state == DM_RH_CLEAN) {
305 spin_lock(&rh->region_lock);
306 list_add(&nreg->list, &rh->clean_regions);
307 spin_unlock(&rh->region_lock);
308 }
309
310 reg = nreg;
311 }
312 write_unlock_irq(&rh->hash_lock);
313
314 return reg;
315}
316
317static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
318{
319 struct dm_region *reg;
320
321 reg = __rh_lookup(rh, region);
322 if (!reg) {
323 read_unlock(&rh->hash_lock);
324 reg = __rh_alloc(rh, region);
325 read_lock(&rh->hash_lock);
326 }
327
328 return reg;
329}
330
331int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
332{
333 int r;
334 struct dm_region *reg;
335
336 read_lock(&rh->hash_lock);
337 reg = __rh_lookup(rh, region);
338 read_unlock(&rh->hash_lock);
339
340 if (reg)
341 return reg->state;
342
343 /*
344 * The region wasn't in the hash, so we fall back to the
345 * dirty log.
346 */
347 r = rh->log->type->in_sync(rh->log, region, may_block);
348
349 /*
350 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
351 * taken as a DM_RH_NOSYNC
352 */
353 return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
354}
355EXPORT_SYMBOL_GPL(dm_rh_get_state);
356
357static void complete_resync_work(struct dm_region *reg, int success)
358{
359 struct dm_region_hash *rh = reg->rh;
360
361 rh->log->type->set_region_sync(rh->log, reg->key, success);
362
363 /*
364 * Dispatch the bios before we call 'wake_up_all'.
365 * This is important because if we are suspending,
366 * we want to know that recovery is complete and
367 * the work queue is flushed. If we wake_up_all
368 * before we dispatch_bios (queue bios and call wake()),
369 * then we risk suspending before the work queue
370 * has been properly flushed.
371 */
372 rh->dispatch_bios(rh->context, &reg->delayed_bios);
373 if (atomic_dec_and_test(&rh->recovery_in_flight))
374 rh->wakeup_all_recovery_waiters(rh->context);
375 up(&rh->recovery_count);
376}
377
378/* dm_rh_mark_nosync
379 * @ms
380 * @bio
381 * @done
382 * @error
383 *
384 * The bio was written on some mirror(s) but failed on other mirror(s).
385 * We can successfully endio the bio but should avoid the region being
386 * marked clean by setting the state DM_RH_NOSYNC.
387 *
388 * This function is _not_ safe in interrupt context!
389 */
390void dm_rh_mark_nosync(struct dm_region_hash *rh,
391 struct bio *bio, unsigned done, int error)
392{
393 unsigned long flags;
394 struct dm_dirty_log *log = rh->log;
395 struct dm_region *reg;
396 region_t region = dm_rh_bio_to_region(rh, bio);
397 int recovering = 0;
398
399 /* We must inform the log that the sync count has changed. */
400 log->type->set_region_sync(log, region, 0);
401
402 read_lock(&rh->hash_lock);
403 reg = __rh_find(rh, region);
404 read_unlock(&rh->hash_lock);
405
406 /* region hash entry should exist because write was in-flight */
407 BUG_ON(!reg);
408 BUG_ON(!list_empty(&reg->list));
409
410 spin_lock_irqsave(&rh->region_lock, flags);
411 /*
412 * Possible cases:
413 * 1) DM_RH_DIRTY
414 * 2) DM_RH_NOSYNC: was dirty, other preceeding writes failed
415 * 3) DM_RH_RECOVERING: flushing pending writes
416 * Either case, the region should have not been connected to list.
417 */
418 recovering = (reg->state == DM_RH_RECOVERING);
419 reg->state = DM_RH_NOSYNC;
420 BUG_ON(!list_empty(&reg->list));
421 spin_unlock_irqrestore(&rh->region_lock, flags);
422
423 bio_endio(bio, error);
424 if (recovering)
425 complete_resync_work(reg, 0);
426}
427EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
428
429void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
430{
431 struct dm_region *reg, *next;
432
433 LIST_HEAD(clean);
434 LIST_HEAD(recovered);
435 LIST_HEAD(failed_recovered);
436
437 /*
438 * Quickly grab the lists.
439 */
440 write_lock_irq(&rh->hash_lock);
441 spin_lock(&rh->region_lock);
442 if (!list_empty(&rh->clean_regions)) {
443 list_splice_init(&rh->clean_regions, &clean);
444
445 list_for_each_entry(reg, &clean, list)
446 list_del(&reg->hash_list);
447 }
448
449 if (!list_empty(&rh->recovered_regions)) {
450 list_splice_init(&rh->recovered_regions, &recovered);
451
452 list_for_each_entry(reg, &recovered, list)
453 list_del(&reg->hash_list);
454 }
455
456 if (!list_empty(&rh->failed_recovered_regions)) {
457 list_splice_init(&rh->failed_recovered_regions,
458 &failed_recovered);
459
460 list_for_each_entry(reg, &failed_recovered, list)
461 list_del(&reg->hash_list);
462 }
463
464 spin_unlock(&rh->region_lock);
465 write_unlock_irq(&rh->hash_lock);
466
467 /*
468 * All the regions on the recovered and clean lists have
469 * now been pulled out of the system, so no need to do
470 * any more locking.
471 */
472 list_for_each_entry_safe(reg, next, &recovered, list) {
473 rh->log->type->clear_region(rh->log, reg->key);
474 complete_resync_work(reg, 1);
475 mempool_free(reg, rh->region_pool);
476 }
477
478 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
479 complete_resync_work(reg, errors_handled ? 0 : 1);
480 mempool_free(reg, rh->region_pool);
481 }
482
483 list_for_each_entry_safe(reg, next, &clean, list) {
484 rh->log->type->clear_region(rh->log, reg->key);
485 mempool_free(reg, rh->region_pool);
486 }
487
488 rh->log->type->flush(rh->log);
489}
490EXPORT_SYMBOL_GPL(dm_rh_update_states);
491
492static void rh_inc(struct dm_region_hash *rh, region_t region)
493{
494 struct dm_region *reg;
495
496 read_lock(&rh->hash_lock);
497 reg = __rh_find(rh, region);
498
499 spin_lock_irq(&rh->region_lock);
500 atomic_inc(&reg->pending);
501
502 if (reg->state == DM_RH_CLEAN) {
503 reg->state = DM_RH_DIRTY;
504 list_del_init(&reg->list); /* take off the clean list */
505 spin_unlock_irq(&rh->region_lock);
506
507 rh->log->type->mark_region(rh->log, reg->key);
508 } else
509 spin_unlock_irq(&rh->region_lock);
510
511
512 read_unlock(&rh->hash_lock);
513}
514
515void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
516{
517 struct bio *bio;
518
519 for (bio = bios->head; bio; bio = bio->bi_next)
520 rh_inc(rh, dm_rh_bio_to_region(rh, bio));
521}
522EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
523
524void dm_rh_dec(struct dm_region_hash *rh, region_t region)
525{
526 unsigned long flags;
527 struct dm_region *reg;
528 int should_wake = 0;
529
530 read_lock(&rh->hash_lock);
531 reg = __rh_lookup(rh, region);
532 read_unlock(&rh->hash_lock);
533
534 spin_lock_irqsave(&rh->region_lock, flags);
535 if (atomic_dec_and_test(&reg->pending)) {
536 /*
537 * There is no pending I/O for this region.
538 * We can move the region to corresponding list for next action.
539 * At this point, the region is not yet connected to any list.
540 *
541 * If the state is DM_RH_NOSYNC, the region should be kept off
542 * from clean list.
543 * The hash entry for DM_RH_NOSYNC will remain in memory
544 * until the region is recovered or the map is reloaded.
545 */
546
547 /* do nothing for DM_RH_NOSYNC */
548 if (reg->state == DM_RH_RECOVERING) {
549 list_add_tail(&reg->list, &rh->quiesced_regions);
550 } else if (reg->state == DM_RH_DIRTY) {
551 reg->state = DM_RH_CLEAN;
552 list_add(&reg->list, &rh->clean_regions);
553 }
554 should_wake = 1;
555 }
556 spin_unlock_irqrestore(&rh->region_lock, flags);
557
558 if (should_wake)
559 rh->wakeup_workers(rh->context);
560}
561EXPORT_SYMBOL_GPL(dm_rh_dec);
562
563/*
564 * Starts quiescing a region in preparation for recovery.
565 */
566static int __rh_recovery_prepare(struct dm_region_hash *rh)
567{
568 int r;
569 region_t region;
570 struct dm_region *reg;
571
572 /*
573 * Ask the dirty log what's next.
574 */
575 r = rh->log->type->get_resync_work(rh->log, &region);
576 if (r <= 0)
577 return r;
578
579 /*
580 * Get this region, and start it quiescing by setting the
581 * recovering flag.
582 */
583 read_lock(&rh->hash_lock);
584 reg = __rh_find(rh, region);
585 read_unlock(&rh->hash_lock);
586
587 spin_lock_irq(&rh->region_lock);
588 reg->state = DM_RH_RECOVERING;
589
590 /* Already quiesced ? */
591 if (atomic_read(&reg->pending))
592 list_del_init(&reg->list);
593 else
594 list_move(&reg->list, &rh->quiesced_regions);
595
596 spin_unlock_irq(&rh->region_lock);
597
598 return 1;
599}
600
601void dm_rh_recovery_prepare(struct dm_region_hash *rh)
602{
603 /* Extra reference to avoid race with dm_rh_stop_recovery */
604 atomic_inc(&rh->recovery_in_flight);
605
606 while (!down_trylock(&rh->recovery_count)) {
607 atomic_inc(&rh->recovery_in_flight);
608 if (__rh_recovery_prepare(rh) <= 0) {
609 atomic_dec(&rh->recovery_in_flight);
610 up(&rh->recovery_count);
611 break;
612 }
613 }
614
615 /* Drop the extra reference */
616 if (atomic_dec_and_test(&rh->recovery_in_flight))
617 rh->wakeup_all_recovery_waiters(rh->context);
618}
619EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
620
621/*
622 * Returns any quiesced regions.
623 */
624struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
625{
626 struct dm_region *reg = NULL;
627
628 spin_lock_irq(&rh->region_lock);
629 if (!list_empty(&rh->quiesced_regions)) {
630 reg = list_entry(rh->quiesced_regions.next,
631 struct dm_region, list);
632 list_del_init(&reg->list); /* remove from the quiesced list */
633 }
634 spin_unlock_irq(&rh->region_lock);
635
636 return reg;
637}
638EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
639
640void dm_rh_recovery_end(struct dm_region *reg, int success)
641{
642 struct dm_region_hash *rh = reg->rh;
643
644 spin_lock_irq(&rh->region_lock);
645 if (success)
646 list_add(&reg->list, &reg->rh->recovered_regions);
647 else {
648 reg->state = DM_RH_NOSYNC;
649 list_add(&reg->list, &reg->rh->failed_recovered_regions);
650 }
651 spin_unlock_irq(&rh->region_lock);
652
653 rh->wakeup_workers(rh->context);
654}
655EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
656
657/* Return recovery in flight count. */
658int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
659{
660 return atomic_read(&rh->recovery_in_flight);
661}
662EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
663
664int dm_rh_flush(struct dm_region_hash *rh)
665{
666 return rh->log->type->flush(rh->log);
667}
668EXPORT_SYMBOL_GPL(dm_rh_flush);
669
670void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
671{
672 struct dm_region *reg;
673
674 read_lock(&rh->hash_lock);
675 reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
676 bio_list_add(&reg->delayed_bios, bio);
677 read_unlock(&rh->hash_lock);
678}
679EXPORT_SYMBOL_GPL(dm_rh_delay);
680
681void dm_rh_stop_recovery(struct dm_region_hash *rh)
682{
683 int i;
684
685 /* wait for any recovering regions */
686 for (i = 0; i < rh->max_recovery; i++)
687 down(&rh->recovery_count);
688}
689EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
690
691void dm_rh_start_recovery(struct dm_region_hash *rh)
692{
693 int i;
694
695 for (i = 0; i < rh->max_recovery; i++)
696 up(&rh->recovery_count);
697
698 rh->wakeup_workers(rh->context);
699}
700EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
701
702MODULE_DESCRIPTION(DM_NAME " region hash");
703MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
704MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 391dfa2ad434..cdfbf65b28cb 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -9,7 +9,8 @@
9 * Round-robin path selector. 9 * Round-robin path selector.
10 */ 10 */
11 11
12#include "dm.h" 12#include <linux/device-mapper.h>
13
13#include "dm-path-selector.h" 14#include "dm-path-selector.h"
14 15
15#include <linux/slab.h> 16#include <linux/slab.h>
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 6e5528aecc98..b2d9d1ac28ad 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -600,7 +600,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
600 600
601 s->valid = 1; 601 s->valid = 1;
602 s->active = 0; 602 s->active = 0;
603 s->last_percent = 0;
604 init_rwsem(&s->lock); 603 init_rwsem(&s->lock);
605 spin_lock_init(&s->pe_lock); 604 spin_lock_init(&s->pe_lock);
606 s->ti = ti; 605 s->ti = ti;
@@ -824,8 +823,10 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
824 * the bios for the original write to the origin. 823 * the bios for the original write to the origin.
825 */ 824 */
826 if (primary_pe && 825 if (primary_pe &&
827 atomic_dec_and_test(&primary_pe->ref_count)) 826 atomic_dec_and_test(&primary_pe->ref_count)) {
828 origin_bios = bio_list_get(&primary_pe->origin_bios); 827 origin_bios = bio_list_get(&primary_pe->origin_bios);
828 free_pending_exception(primary_pe);
829 }
829 830
830 /* 831 /*
831 * Free the pe if it's not linked to an origin write or if 832 * Free the pe if it's not linked to an origin write or if
@@ -834,12 +835,6 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
834 if (!primary_pe || primary_pe != pe) 835 if (!primary_pe || primary_pe != pe)
835 free_pending_exception(pe); 836 free_pending_exception(pe);
836 837
837 /*
838 * Free the primary pe if nothing references it.
839 */
840 if (primary_pe && !atomic_read(&primary_pe->ref_count))
841 free_pending_exception(primary_pe);
842
843 return origin_bios; 838 return origin_bios;
844} 839}
845 840
diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h
index 292c15609ae3..f07315fe2362 100644
--- a/drivers/md/dm-snap.h
+++ b/drivers/md/dm-snap.h
@@ -9,7 +9,7 @@
9#ifndef DM_SNAPSHOT_H 9#ifndef DM_SNAPSHOT_H
10#define DM_SNAPSHOT_H 10#define DM_SNAPSHOT_H
11 11
12#include "dm.h" 12#include <linux/device-mapper.h>
13#include "dm-bio-list.h" 13#include "dm-bio-list.h"
14#include <linux/blkdev.h> 14#include <linux/blkdev.h>
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
@@ -158,9 +158,6 @@ struct dm_snapshot {
158 /* Used for display of table */ 158 /* Used for display of table */
159 char type; 159 char type;
160 160
161 /* The last percentage we notified */
162 int last_percent;
163
164 mempool_t *pending_pool; 161 mempool_t *pending_pool;
165 162
166 struct exception_table pending; 163 struct exception_table pending;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index b745d8ac625b..a2d068dbe9e2 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -4,7 +4,7 @@
4 * This file is released under the GPL. 4 * This file is released under the GPL.
5 */ 5 */
6 6
7#include "dm.h" 7#include <linux/device-mapper.h>
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/init.h> 10#include <linux/init.h>
@@ -60,8 +60,8 @@ static inline struct stripe_c *alloc_context(unsigned int stripes)
60{ 60{
61 size_t len; 61 size_t len;
62 62
63 if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), 63 if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe),
64 stripes)) 64 stripes))
65 return NULL; 65 return NULL;
66 66
67 len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); 67 len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes);
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index bdec206c404b..cdbf126ec106 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -4,7 +4,7 @@
4 * This file is released under the GPL. 4 * This file is released under the GPL.
5 */ 5 */
6 6
7#include "dm.h" 7#include <linux/device-mapper.h>
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/init.h> 10#include <linux/init.h>
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 327de03a5bdf..d1d0cd0f5750 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -76,7 +76,6 @@ union map_info *dm_get_mapinfo(struct bio *bio)
76 */ 76 */
77struct dm_wq_req { 77struct dm_wq_req {
78 enum { 78 enum {
79 DM_WQ_FLUSH_ALL,
80 DM_WQ_FLUSH_DEFERRED, 79 DM_WQ_FLUSH_DEFERRED,
81 } type; 80 } type;
82 struct work_struct work; 81 struct work_struct work;
@@ -151,40 +150,40 @@ static struct kmem_cache *_tio_cache;
151 150
152static int __init local_init(void) 151static int __init local_init(void)
153{ 152{
154 int r; 153 int r = -ENOMEM;
155 154
156 /* allocate a slab for the dm_ios */ 155 /* allocate a slab for the dm_ios */
157 _io_cache = KMEM_CACHE(dm_io, 0); 156 _io_cache = KMEM_CACHE(dm_io, 0);
158 if (!_io_cache) 157 if (!_io_cache)
159 return -ENOMEM; 158 return r;
160 159
161 /* allocate a slab for the target ios */ 160 /* allocate a slab for the target ios */
162 _tio_cache = KMEM_CACHE(dm_target_io, 0); 161 _tio_cache = KMEM_CACHE(dm_target_io, 0);
163 if (!_tio_cache) { 162 if (!_tio_cache)
164 kmem_cache_destroy(_io_cache); 163 goto out_free_io_cache;
165 return -ENOMEM;
166 }
167 164
168 r = dm_uevent_init(); 165 r = dm_uevent_init();
169 if (r) { 166 if (r)
170 kmem_cache_destroy(_tio_cache); 167 goto out_free_tio_cache;
171 kmem_cache_destroy(_io_cache);
172 return r;
173 }
174 168
175 _major = major; 169 _major = major;
176 r = register_blkdev(_major, _name); 170 r = register_blkdev(_major, _name);
177 if (r < 0) { 171 if (r < 0)
178 kmem_cache_destroy(_tio_cache); 172 goto out_uevent_exit;
179 kmem_cache_destroy(_io_cache);
180 dm_uevent_exit();
181 return r;
182 }
183 173
184 if (!_major) 174 if (!_major)
185 _major = r; 175 _major = r;
186 176
187 return 0; 177 return 0;
178
179out_uevent_exit:
180 dm_uevent_exit();
181out_free_tio_cache:
182 kmem_cache_destroy(_tio_cache);
183out_free_io_cache:
184 kmem_cache_destroy(_io_cache);
185
186 return r;
188} 187}
189 188
190static void local_exit(void) 189static void local_exit(void)
@@ -669,6 +668,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
669 clone->bi_size = to_bytes(len); 668 clone->bi_size = to_bytes(len);
670 clone->bi_io_vec->bv_offset = offset; 669 clone->bi_io_vec->bv_offset = offset;
671 clone->bi_io_vec->bv_len = clone->bi_size; 670 clone->bi_io_vec->bv_len = clone->bi_size;
671 clone->bi_flags |= 1 << BIO_CLONED;
672 672
673 return clone; 673 return clone;
674} 674}
@@ -1394,9 +1394,6 @@ static void dm_wq_work(struct work_struct *work)
1394 1394
1395 down_write(&md->io_lock); 1395 down_write(&md->io_lock);
1396 switch (req->type) { 1396 switch (req->type) {
1397 case DM_WQ_FLUSH_ALL:
1398 __merge_pushback_list(md);
1399 /* pass through */
1400 case DM_WQ_FLUSH_DEFERRED: 1397 case DM_WQ_FLUSH_DEFERRED:
1401 __flush_deferred_io(md); 1398 __flush_deferred_io(md);
1402 break; 1399 break;
@@ -1526,7 +1523,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1526 if (!md->suspended_bdev) { 1523 if (!md->suspended_bdev) {
1527 DMWARN("bdget failed in dm_suspend"); 1524 DMWARN("bdget failed in dm_suspend");
1528 r = -ENOMEM; 1525 r = -ENOMEM;
1529 goto flush_and_out; 1526 goto out;
1530 } 1527 }
1531 1528
1532 /* 1529 /*
@@ -1577,14 +1574,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1577 1574
1578 set_bit(DMF_SUSPENDED, &md->flags); 1575 set_bit(DMF_SUSPENDED, &md->flags);
1579 1576
1580flush_and_out:
1581 if (r && noflush)
1582 /*
1583 * Because there may be already I/Os in the pushback list,
1584 * flush them before return.
1585 */
1586 dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL);
1587
1588out: 1577out:
1589 if (r && md->suspended_bdev) { 1578 if (r && md->suspended_bdev) {
1590 bdput(md->suspended_bdev); 1579 bdput(md->suspended_bdev);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index cd189da2b2fa..0ade60cdef42 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -62,15 +62,6 @@ void dm_put_target_type(struct target_type *t);
62int dm_target_iterate(void (*iter_func)(struct target_type *tt, 62int dm_target_iterate(void (*iter_func)(struct target_type *tt,
63 void *param), void *param); 63 void *param), void *param);
64 64
65/*-----------------------------------------------------------------
66 * Useful inlines.
67 *---------------------------------------------------------------*/
68static inline int array_too_big(unsigned long fixed, unsigned long obj,
69 unsigned long num)
70{
71 return (num > (ULONG_MAX - fixed) / obj);
72}
73
74int dm_split_args(int *argc, char ***argvp, char *input); 65int dm_split_args(int *argc, char ***argvp, char *input);
75 66
76/* 67/*
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 5b34c134aa25..127b0526a727 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -545,11 +545,11 @@ int saa7146_register_device(struct video_device **vid, struct saa7146_dev* dev,
545 if( VFL_TYPE_GRABBER == type ) { 545 if( VFL_TYPE_GRABBER == type ) {
546 vv->video_minor = vfd->minor; 546 vv->video_minor = vfd->minor;
547 INFO(("%s: registered device video%d [v4l2]\n", 547 INFO(("%s: registered device video%d [v4l2]\n",
548 dev->name, vfd->minor & 0x1f)); 548 dev->name, vfd->num));
549 } else { 549 } else {
550 vv->vbi_minor = vfd->minor; 550 vv->vbi_minor = vfd->minor;
551 INFO(("%s: registered device vbi%d [v4l2]\n", 551 INFO(("%s: registered device vbi%d [v4l2]\n",
552 dev->name, vfd->minor & 0x1f)); 552 dev->name, vfd->num));
553 } 553 }
554 554
555 *vid = vfd; 555 *vid = vfd;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 99be9e5c85f7..fe0bd55977e3 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -834,7 +834,7 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
834 * copying is done already, arg is a kernel pointer. 834 * copying is done already, arg is a kernel pointer.
835 */ 835 */
836 836
837int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int cmd, void *arg) 837static int __saa7146_video_do_ioctl(struct file *file, unsigned int cmd, void *arg)
838{ 838{
839 struct saa7146_fh *fh = file->private_data; 839 struct saa7146_fh *fh = file->private_data;
840 struct saa7146_dev *dev = fh->dev; 840 struct saa7146_dev *dev = fh->dev;
@@ -1215,12 +1215,18 @@ int saa7146_video_do_ioctl(struct inode *inode, struct file *file, unsigned int
1215 } 1215 }
1216#endif 1216#endif
1217 default: 1217 default:
1218 return v4l_compat_translate_ioctl(inode,file,cmd,arg, 1218 return v4l_compat_translate_ioctl(file, cmd, arg,
1219 saa7146_video_do_ioctl); 1219 __saa7146_video_do_ioctl);
1220 } 1220 }
1221 return 0; 1221 return 0;
1222} 1222}
1223 1223
1224int saa7146_video_do_ioctl(struct inode *inode, struct file *file,
1225 unsigned int cmd, void *arg)
1226{
1227 return __saa7146_video_do_ioctl(file, cmd, arg);
1228}
1229
1224/*********************************************************************************/ 1230/*********************************************************************************/
1225/* buffer handling functions */ 1231/* buffer handling functions */
1226 1232
diff --git a/drivers/media/dvb/frontends/s5h1411.c b/drivers/media/dvb/frontends/s5h1411.c
index 2febfb5a846b..40644aacffcb 100644
--- a/drivers/media/dvb/frontends/s5h1411.c
+++ b/drivers/media/dvb/frontends/s5h1411.c
@@ -38,6 +38,7 @@ struct s5h1411_state {
38 struct dvb_frontend frontend; 38 struct dvb_frontend frontend;
39 39
40 fe_modulation_t current_modulation; 40 fe_modulation_t current_modulation;
41 unsigned int first_tune:1;
41 42
42 u32 current_frequency; 43 u32 current_frequency;
43 int if_freq; 44 int if_freq;
@@ -62,7 +63,7 @@ static struct init_tab {
62 { S5H1411_I2C_TOP_ADDR, 0x08, 0x0047, }, 63 { S5H1411_I2C_TOP_ADDR, 0x08, 0x0047, },
63 { S5H1411_I2C_TOP_ADDR, 0x1c, 0x0400, }, 64 { S5H1411_I2C_TOP_ADDR, 0x1c, 0x0400, },
64 { S5H1411_I2C_TOP_ADDR, 0x1e, 0x0370, }, 65 { S5H1411_I2C_TOP_ADDR, 0x1e, 0x0370, },
65 { S5H1411_I2C_TOP_ADDR, 0x1f, 0x342a, }, 66 { S5H1411_I2C_TOP_ADDR, 0x1f, 0x342c, },
66 { S5H1411_I2C_TOP_ADDR, 0x24, 0x0231, }, 67 { S5H1411_I2C_TOP_ADDR, 0x24, 0x0231, },
67 { S5H1411_I2C_TOP_ADDR, 0x25, 0x1011, }, 68 { S5H1411_I2C_TOP_ADDR, 0x25, 0x1011, },
68 { S5H1411_I2C_TOP_ADDR, 0x26, 0x0f07, }, 69 { S5H1411_I2C_TOP_ADDR, 0x26, 0x0f07, },
@@ -100,7 +101,6 @@ static struct init_tab {
100 { S5H1411_I2C_TOP_ADDR, 0x78, 0x3141, }, 101 { S5H1411_I2C_TOP_ADDR, 0x78, 0x3141, },
101 { S5H1411_I2C_TOP_ADDR, 0x7a, 0x3141, }, 102 { S5H1411_I2C_TOP_ADDR, 0x7a, 0x3141, },
102 { S5H1411_I2C_TOP_ADDR, 0xb3, 0x8003, }, 103 { S5H1411_I2C_TOP_ADDR, 0xb3, 0x8003, },
103 { S5H1411_I2C_TOP_ADDR, 0xb5, 0xafbb, },
104 { S5H1411_I2C_TOP_ADDR, 0xb5, 0xa6bb, }, 104 { S5H1411_I2C_TOP_ADDR, 0xb5, 0xa6bb, },
105 { S5H1411_I2C_TOP_ADDR, 0xb6, 0x0609, }, 105 { S5H1411_I2C_TOP_ADDR, 0xb6, 0x0609, },
106 { S5H1411_I2C_TOP_ADDR, 0xb7, 0x2f06, }, 106 { S5H1411_I2C_TOP_ADDR, 0xb7, 0x2f06, },
@@ -393,7 +393,7 @@ static int s5h1411_set_if_freq(struct dvb_frontend *fe, int KHz)
393 393
394 switch (KHz) { 394 switch (KHz) {
395 case 3250: 395 case 3250:
396 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x10d9); 396 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x10d5);
397 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x5342); 397 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x39, 0x5342);
398 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x10d9); 398 s5h1411_writereg(state, S5H1411_I2C_QAM_ADDR, 0x2c, 0x10d9);
399 break; 399 break;
@@ -464,13 +464,25 @@ static int s5h1411_set_spectralinversion(struct dvb_frontend *fe, int inversion)
464 464
465 if (inversion == 1) 465 if (inversion == 1)
466 val |= 0x1000; /* Inverted */ 466 val |= 0x1000; /* Inverted */
467 else
468 val |= 0x0000;
469 467
470 state->inversion = inversion; 468 state->inversion = inversion;
471 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x24, val); 469 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x24, val);
472} 470}
473 471
472static int s5h1411_set_serialmode(struct dvb_frontend *fe, int serial)
473{
474 struct s5h1411_state *state = fe->demodulator_priv;
475 u16 val;
476
477 dprintk("%s(%d)\n", __func__, serial);
478 val = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xbd) & ~0x100;
479
480 if (serial == 1)
481 val |= 0x100;
482
483 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, val);
484}
485
474static int s5h1411_enable_modulation(struct dvb_frontend *fe, 486static int s5h1411_enable_modulation(struct dvb_frontend *fe,
475 fe_modulation_t m) 487 fe_modulation_t m)
476{ 488{
@@ -478,6 +490,12 @@ static int s5h1411_enable_modulation(struct dvb_frontend *fe,
478 490
479 dprintk("%s(0x%08x)\n", __func__, m); 491 dprintk("%s(0x%08x)\n", __func__, m);
480 492
493 if ((state->first_tune == 0) && (m == state->current_modulation)) {
494 dprintk("%s() Already at desired modulation. Skipping...\n",
495 __func__);
496 return 0;
497 }
498
481 switch (m) { 499 switch (m) {
482 case VSB_8: 500 case VSB_8:
483 dprintk("%s() VSB_8\n", __func__); 501 dprintk("%s() VSB_8\n", __func__);
@@ -502,6 +520,7 @@ static int s5h1411_enable_modulation(struct dvb_frontend *fe,
502 } 520 }
503 521
504 state->current_modulation = m; 522 state->current_modulation = m;
523 state->first_tune = 0;
505 s5h1411_softreset(fe); 524 s5h1411_softreset(fe);
506 525
507 return 0; 526 return 0;
@@ -535,7 +554,7 @@ static int s5h1411_set_gpio(struct dvb_frontend *fe, int enable)
535 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0, val); 554 return s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xe0, val);
536} 555}
537 556
538static int s5h1411_sleep(struct dvb_frontend *fe, int enable) 557static int s5h1411_set_powerstate(struct dvb_frontend *fe, int enable)
539{ 558{
540 struct s5h1411_state *state = fe->demodulator_priv; 559 struct s5h1411_state *state = fe->demodulator_priv;
541 560
@@ -551,6 +570,11 @@ static int s5h1411_sleep(struct dvb_frontend *fe, int enable)
551 return 0; 570 return 0;
552} 571}
553 572
573static int s5h1411_sleep(struct dvb_frontend *fe)
574{
575 return s5h1411_set_powerstate(fe, 1);
576}
577
554static int s5h1411_register_reset(struct dvb_frontend *fe) 578static int s5h1411_register_reset(struct dvb_frontend *fe)
555{ 579{
556 struct s5h1411_state *state = fe->demodulator_priv; 580 struct s5h1411_state *state = fe->demodulator_priv;
@@ -574,9 +598,6 @@ static int s5h1411_set_frontend(struct dvb_frontend *fe,
574 598
575 s5h1411_enable_modulation(fe, p->u.vsb.modulation); 599 s5h1411_enable_modulation(fe, p->u.vsb.modulation);
576 600
577 /* Allow the demod to settle */
578 msleep(100);
579
580 if (fe->ops.tuner_ops.set_params) { 601 if (fe->ops.tuner_ops.set_params) {
581 if (fe->ops.i2c_gate_ctrl) 602 if (fe->ops.i2c_gate_ctrl)
582 fe->ops.i2c_gate_ctrl(fe, 1); 603 fe->ops.i2c_gate_ctrl(fe, 1);
@@ -587,6 +608,10 @@ static int s5h1411_set_frontend(struct dvb_frontend *fe,
587 fe->ops.i2c_gate_ctrl(fe, 0); 608 fe->ops.i2c_gate_ctrl(fe, 0);
588 } 609 }
589 610
611 /* Issue a reset to the demod so it knows to resync against the
612 newly tuned frequency */
613 s5h1411_softreset(fe);
614
590 return 0; 615 return 0;
591} 616}
592 617
@@ -599,7 +624,7 @@ static int s5h1411_init(struct dvb_frontend *fe)
599 624
600 dprintk("%s()\n", __func__); 625 dprintk("%s()\n", __func__);
601 626
602 s5h1411_sleep(fe, 0); 627 s5h1411_set_powerstate(fe, 0);
603 s5h1411_register_reset(fe); 628 s5h1411_register_reset(fe);
604 629
605 for (i = 0; i < ARRAY_SIZE(init_tab); i++) 630 for (i = 0; i < ARRAY_SIZE(init_tab); i++)
@@ -610,12 +635,17 @@ static int s5h1411_init(struct dvb_frontend *fe)
610 /* The datasheet says that after initialisation, VSB is default */ 635 /* The datasheet says that after initialisation, VSB is default */
611 state->current_modulation = VSB_8; 636 state->current_modulation = VSB_8;
612 637
638 /* Although the datasheet says it's in VSB, empirical evidence
639 shows problems getting lock on the first tuning request. Make
640 sure we call enable_modulation the first time around */
641 state->first_tune = 1;
642
613 if (state->config->output_mode == S5H1411_SERIAL_OUTPUT) 643 if (state->config->output_mode == S5H1411_SERIAL_OUTPUT)
614 /* Serial */ 644 /* Serial */
615 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1101); 645 s5h1411_set_serialmode(fe, 1);
616 else 646 else
617 /* Parallel */ 647 /* Parallel */
618 s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0xbd, 0x1001); 648 s5h1411_set_serialmode(fe, 0);
619 649
620 s5h1411_set_spectralinversion(fe, state->config->inversion); 650 s5h1411_set_spectralinversion(fe, state->config->inversion);
621 s5h1411_set_if_freq(fe, state->config->vsb_if); 651 s5h1411_set_if_freq(fe, state->config->vsb_if);
@@ -637,28 +667,29 @@ static int s5h1411_read_status(struct dvb_frontend *fe, fe_status_t *status)
637 667
638 *status = 0; 668 *status = 0;
639 669
640 /* Get the demodulator status */ 670 /* Register F2 bit 15 = Master Lock, removed */
641 reg = (s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2) >> 15)
642 & 0x0001;
643 if (reg)
644 *status |= FE_HAS_LOCK | FE_HAS_CARRIER | FE_HAS_SIGNAL;
645 671
646 switch (state->current_modulation) { 672 switch (state->current_modulation) {
647 case QAM_64: 673 case QAM_64:
648 case QAM_256: 674 case QAM_256:
649 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf0); 675 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf0);
650 if (reg & 0x100) 676 if (reg & 0x10) /* QAM FEC Lock */
651 *status |= FE_HAS_VITERBI; 677 *status |= FE_HAS_SYNC | FE_HAS_LOCK;
652 if (reg & 0x10) 678 if (reg & 0x100) /* QAM EQ Lock */
653 *status |= FE_HAS_SYNC; 679 *status |= FE_HAS_VITERBI | FE_HAS_CARRIER | FE_HAS_SIGNAL;
680
654 break; 681 break;
655 case VSB_8: 682 case VSB_8:
656 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x5e);
657 if (reg & 0x0001)
658 *status |= FE_HAS_SYNC;
659 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2); 683 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0xf2);
660 if (reg & 0x1000) 684 if (reg & 0x1000) /* FEC Lock */
661 *status |= FE_HAS_VITERBI; 685 *status |= FE_HAS_SYNC | FE_HAS_LOCK;
686 if (reg & 0x2000) /* EQ Lock */
687 *status |= FE_HAS_VITERBI | FE_HAS_CARRIER | FE_HAS_SIGNAL;
688
689 reg = s5h1411_readreg(state, S5H1411_I2C_TOP_ADDR, 0x53);
690 if (reg & 0x1) /* AFC Lock */
691 *status |= FE_HAS_SIGNAL;
692
662 break; 693 break;
663 default: 694 default:
664 return -EINVAL; 695 return -EINVAL;
@@ -863,6 +894,7 @@ static struct dvb_frontend_ops s5h1411_ops = {
863 }, 894 },
864 895
865 .init = s5h1411_init, 896 .init = s5h1411_init,
897 .sleep = s5h1411_sleep,
866 .i2c_gate_ctrl = s5h1411_i2c_gate_ctrl, 898 .i2c_gate_ctrl = s5h1411_i2c_gate_ctrl,
867 .set_frontend = s5h1411_set_frontend, 899 .set_frontend = s5h1411_set_frontend,
868 .get_frontend = s5h1411_get_frontend, 900 .get_frontend = s5h1411_get_frontend,
diff --git a/drivers/media/dvb/frontends/s5h1411.h b/drivers/media/dvb/frontends/s5h1411.h
index 7d542bc00c48..45ec0f82989c 100644
--- a/drivers/media/dvb/frontends/s5h1411.h
+++ b/drivers/media/dvb/frontends/s5h1411.h
@@ -47,7 +47,7 @@ struct s5h1411_config {
47 u16 mpeg_timing; 47 u16 mpeg_timing;
48 48
49 /* IF Freq for QAM and VSB in KHz */ 49 /* IF Freq for QAM and VSB in KHz */
50#define S5H1411_IF_2500 2500 50#define S5H1411_IF_3250 3250
51#define S5H1411_IF_3500 3500 51#define S5H1411_IF_3500 3500
52#define S5H1411_IF_4000 4000 52#define S5H1411_IF_4000 4000
53#define S5H1411_IF_5380 5380 53#define S5H1411_IF_5380 5380
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 78f56944e640..a5ca176a7b08 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -171,11 +171,11 @@ static int dsbr100_start(struct dsbr100_device *radio)
171 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 171 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
172 USB_REQ_GET_STATUS, 172 USB_REQ_GET_STATUS,
173 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 173 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
174 0x00, 0xC7, radio->transfer_buffer, 8, 300)<0 || 174 0x00, 0xC7, radio->transfer_buffer, 8, 300) < 0 ||
175 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 175 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
176 DSB100_ONOFF, 176 DSB100_ONOFF,
177 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 177 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
178 0x01, 0x00, radio->transfer_buffer, 8, 300)<0) 178 0x01, 0x00, radio->transfer_buffer, 8, 300) < 0)
179 return -1; 179 return -1;
180 radio->muted=0; 180 radio->muted=0;
181 return (radio->transfer_buffer)[0]; 181 return (radio->transfer_buffer)[0];
@@ -188,11 +188,11 @@ static int dsbr100_stop(struct dsbr100_device *radio)
188 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 188 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
189 USB_REQ_GET_STATUS, 189 USB_REQ_GET_STATUS,
190 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 190 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
191 0x16, 0x1C, radio->transfer_buffer, 8, 300)<0 || 191 0x16, 0x1C, radio->transfer_buffer, 8, 300) < 0 ||
192 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 192 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
193 DSB100_ONOFF, 193 DSB100_ONOFF,
194 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 194 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
195 0x00, 0x00, radio->transfer_buffer, 8, 300)<0) 195 0x00, 0x00, radio->transfer_buffer, 8, 300) < 0)
196 return -1; 196 return -1;
197 radio->muted=1; 197 radio->muted=1;
198 return (radio->transfer_buffer)[0]; 198 return (radio->transfer_buffer)[0];
@@ -201,24 +201,24 @@ static int dsbr100_stop(struct dsbr100_device *radio)
201/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */ 201/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
202static int dsbr100_setfreq(struct dsbr100_device *radio, int freq) 202static int dsbr100_setfreq(struct dsbr100_device *radio, int freq)
203{ 203{
204 freq = (freq/16*80)/1000+856; 204 freq = (freq / 16 * 80) / 1000 + 856;
205 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 205 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
206 DSB100_TUNE, 206 DSB100_TUNE,
207 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 207 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
208 (freq>>8)&0x00ff, freq&0xff, 208 (freq >> 8) & 0x00ff, freq & 0xff,
209 radio->transfer_buffer, 8, 300)<0 || 209 radio->transfer_buffer, 8, 300) < 0 ||
210 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 210 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
211 USB_REQ_GET_STATUS, 211 USB_REQ_GET_STATUS,
212 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 212 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
213 0x96, 0xB7, radio->transfer_buffer, 8, 300)<0 || 213 0x96, 0xB7, radio->transfer_buffer, 8, 300) < 0 ||
214 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 214 usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
215 USB_REQ_GET_STATUS, 215 USB_REQ_GET_STATUS,
216 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 216 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
217 0x00, 0x24, radio->transfer_buffer, 8, 300)<0) { 217 0x00, 0x24, radio->transfer_buffer, 8, 300) < 0) {
218 radio->stereo = -1; 218 radio->stereo = -1;
219 return -1; 219 return -1;
220 } 220 }
221 radio->stereo = ! ((radio->transfer_buffer)[0]&0x01); 221 radio->stereo = !((radio->transfer_buffer)[0] & 0x01);
222 return (radio->transfer_buffer)[0]; 222 return (radio->transfer_buffer)[0];
223} 223}
224 224
@@ -229,10 +229,10 @@ static void dsbr100_getstat(struct dsbr100_device *radio)
229 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 229 if (usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0),
230 USB_REQ_GET_STATUS, 230 USB_REQ_GET_STATUS,
231 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 231 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
232 0x00 , 0x24, radio->transfer_buffer, 8, 300)<0) 232 0x00 , 0x24, radio->transfer_buffer, 8, 300) < 0)
233 radio->stereo = -1; 233 radio->stereo = -1;
234 else 234 else
235 radio->stereo = ! (radio->transfer_buffer[0]&0x01); 235 radio->stereo = !(radio->transfer_buffer[0] & 0x01);
236} 236}
237 237
238 238
@@ -265,7 +265,7 @@ static int vidioc_querycap(struct file *file, void *priv,
265{ 265{
266 strlcpy(v->driver, "dsbr100", sizeof(v->driver)); 266 strlcpy(v->driver, "dsbr100", sizeof(v->driver));
267 strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card)); 267 strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card));
268 sprintf(v->bus_info, "ISA"); 268 sprintf(v->bus_info, "USB");
269 v->version = RADIO_VERSION; 269 v->version = RADIO_VERSION;
270 v->capabilities = V4L2_CAP_TUNER; 270 v->capabilities = V4L2_CAP_TUNER;
271 return 0; 271 return 0;
@@ -282,9 +282,9 @@ static int vidioc_g_tuner(struct file *file, void *priv,
282 dsbr100_getstat(radio); 282 dsbr100_getstat(radio);
283 strcpy(v->name, "FM"); 283 strcpy(v->name, "FM");
284 v->type = V4L2_TUNER_RADIO; 284 v->type = V4L2_TUNER_RADIO;
285 v->rangelow = FREQ_MIN*FREQ_MUL; 285 v->rangelow = FREQ_MIN * FREQ_MUL;
286 v->rangehigh = FREQ_MAX*FREQ_MUL; 286 v->rangehigh = FREQ_MAX * FREQ_MUL;
287 v->rxsubchans = V4L2_TUNER_SUB_MONO|V4L2_TUNER_SUB_STEREO; 287 v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
288 v->capability = V4L2_TUNER_CAP_LOW; 288 v->capability = V4L2_TUNER_CAP_LOW;
289 if(radio->stereo) 289 if(radio->stereo)
290 v->audmode = V4L2_TUNER_MODE_STEREO; 290 v->audmode = V4L2_TUNER_MODE_STEREO;
@@ -309,7 +309,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
309 struct dsbr100_device *radio = video_drvdata(file); 309 struct dsbr100_device *radio = video_drvdata(file);
310 310
311 radio->curfreq = f->frequency; 311 radio->curfreq = f->frequency;
312 if (dsbr100_setfreq(radio, radio->curfreq)==-1) 312 if (dsbr100_setfreq(radio, radio->curfreq) == -1)
313 dev_warn(&radio->usbdev->dev, "Set frequency failed\n"); 313 dev_warn(&radio->usbdev->dev, "Set frequency failed\n");
314 return 0; 314 return 0;
315} 315}
@@ -331,8 +331,7 @@ static int vidioc_queryctrl(struct file *file, void *priv,
331 331
332 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) { 332 for (i = 0; i < ARRAY_SIZE(radio_qctrl); i++) {
333 if (qc->id && qc->id == radio_qctrl[i].id) { 333 if (qc->id && qc->id == radio_qctrl[i].id) {
334 memcpy(qc, &(radio_qctrl[i]), 334 memcpy(qc, &(radio_qctrl[i]), sizeof(*qc));
335 sizeof(*qc));
336 return 0; 335 return 0;
337 } 336 }
338 } 337 }
@@ -412,19 +411,25 @@ static int vidioc_s_audio(struct file *file, void *priv,
412static int usb_dsbr100_open(struct inode *inode, struct file *file) 411static int usb_dsbr100_open(struct inode *inode, struct file *file)
413{ 412{
414 struct dsbr100_device *radio = video_drvdata(file); 413 struct dsbr100_device *radio = video_drvdata(file);
414 int retval;
415 415
416 lock_kernel(); 416 lock_kernel();
417 radio->users = 1; 417 radio->users = 1;
418 radio->muted = 1; 418 radio->muted = 1;
419 419
420 if (dsbr100_start(radio)<0) { 420 if (dsbr100_start(radio) < 0) {
421 dev_warn(&radio->usbdev->dev, 421 dev_warn(&radio->usbdev->dev,
422 "Radio did not start up properly\n"); 422 "Radio did not start up properly\n");
423 radio->users = 0; 423 radio->users = 0;
424 unlock_kernel(); 424 unlock_kernel();
425 return -EIO; 425 return -EIO;
426 } 426 }
427 dsbr100_setfreq(radio, radio->curfreq); 427
428 retval = dsbr100_setfreq(radio, radio->curfreq);
429
430 if (retval == -1)
431 printk(KERN_WARNING KBUILD_MODNAME ": Set frequency failed\n");
432
428 unlock_kernel(); 433 unlock_kernel();
429 return 0; 434 return 0;
430} 435}
@@ -485,13 +490,20 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
485{ 490{
486 struct dsbr100_device *radio; 491 struct dsbr100_device *radio;
487 492
488 if (!(radio = kmalloc(sizeof(struct dsbr100_device), GFP_KERNEL))) 493 radio = kmalloc(sizeof(struct dsbr100_device), GFP_KERNEL);
494
495 if (!radio)
489 return -ENOMEM; 496 return -ENOMEM;
490 if (!(radio->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL))) { 497
498 radio->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL);
499
500 if (!(radio->transfer_buffer)) {
491 kfree(radio); 501 kfree(radio);
492 return -ENOMEM; 502 return -ENOMEM;
493 } 503 }
494 if (!(radio->videodev = video_device_alloc())) { 504 radio->videodev = video_device_alloc();
505
506 if (!(radio->videodev)) {
495 kfree(radio->transfer_buffer); 507 kfree(radio->transfer_buffer);
496 kfree(radio); 508 kfree(radio);
497 return -ENOMEM; 509 return -ENOMEM;
@@ -501,7 +513,7 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
501 radio->removed = 0; 513 radio->removed = 0;
502 radio->users = 0; 514 radio->users = 0;
503 radio->usbdev = interface_to_usbdev(intf); 515 radio->usbdev = interface_to_usbdev(intf);
504 radio->curfreq = FREQ_MIN*FREQ_MUL; 516 radio->curfreq = FREQ_MIN * FREQ_MUL;
505 video_set_drvdata(radio->videodev, radio); 517 video_set_drvdata(radio->videodev, radio);
506 if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr) < 0) { 518 if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr) < 0) {
507 dev_warn(&intf->dev, "Could not register video device\n"); 519 dev_warn(&intf->dev, "Could not register video device\n");
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index a33717c48003..256cbeffdcb6 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -469,16 +469,21 @@ static int usb_amradio_open(struct inode *inode, struct file *file)
469{ 469{
470 struct amradio_device *radio = video_get_drvdata(video_devdata(file)); 470 struct amradio_device *radio = video_get_drvdata(video_devdata(file));
471 471
472 lock_kernel();
473
472 radio->users = 1; 474 radio->users = 1;
473 radio->muted = 1; 475 radio->muted = 1;
474 476
475 if (amradio_start(radio) < 0) { 477 if (amradio_start(radio) < 0) {
476 warn("Radio did not start up properly"); 478 warn("Radio did not start up properly");
477 radio->users = 0; 479 radio->users = 0;
480 unlock_kernel();
478 return -EIO; 481 return -EIO;
479 } 482 }
480 if (amradio_setfreq(radio, radio->curfreq) < 0) 483 if (amradio_setfreq(radio, radio->curfreq) < 0)
481 warn("Set frequency failed"); 484 warn("Set frequency failed");
485
486 unlock_kernel();
482 return 0; 487 return 0;
483} 488}
484 489
diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c
index 218754b4906a..e09b00693230 100644
--- a/drivers/media/video/arv.c
+++ b/drivers/media/video/arv.c
@@ -866,7 +866,7 @@ static int __init ar_init(void)
866 } 866 }
867 867
868 printk("video%d: Found M64278 VGA (IRQ %d, Freq %dMHz).\n", 868 printk("video%d: Found M64278 VGA (IRQ %d, Freq %dMHz).\n",
869 ar->vdev->minor, M32R_IRQ_INT3, freq); 869 ar->vdev->num, M32R_IRQ_INT3, freq);
870 870
871 return 0; 871 return 0;
872 872
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 5858bf5ff41c..9ec4cec2e52d 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -4246,7 +4246,7 @@ static int __devinit bttv_register_video(struct bttv *btv)
4246 video_nr[btv->c.nr]) < 0) 4246 video_nr[btv->c.nr]) < 0)
4247 goto err; 4247 goto err;
4248 printk(KERN_INFO "bttv%d: registered device video%d\n", 4248 printk(KERN_INFO "bttv%d: registered device video%d\n",
4249 btv->c.nr,btv->video_dev->minor & 0x1f); 4249 btv->c.nr, btv->video_dev->num);
4250 if (device_create_file(&btv->video_dev->dev, 4250 if (device_create_file(&btv->video_dev->dev,
4251 &dev_attr_card)<0) { 4251 &dev_attr_card)<0) {
4252 printk(KERN_ERR "bttv%d: device_create_file 'card' " 4252 printk(KERN_ERR "bttv%d: device_create_file 'card' "
@@ -4263,7 +4263,7 @@ static int __devinit bttv_register_video(struct bttv *btv)
4263 vbi_nr[btv->c.nr]) < 0) 4263 vbi_nr[btv->c.nr]) < 0)
4264 goto err; 4264 goto err;
4265 printk(KERN_INFO "bttv%d: registered device vbi%d\n", 4265 printk(KERN_INFO "bttv%d: registered device vbi%d\n",
4266 btv->c.nr,btv->vbi_dev->minor & 0x1f); 4266 btv->c.nr, btv->vbi_dev->num);
4267 4267
4268 if (!btv->has_radio) 4268 if (!btv->has_radio)
4269 return 0; 4269 return 0;
@@ -4275,7 +4275,7 @@ static int __devinit bttv_register_video(struct bttv *btv)
4275 radio_nr[btv->c.nr]) < 0) 4275 radio_nr[btv->c.nr]) < 0)
4276 goto err; 4276 goto err;
4277 printk(KERN_INFO "bttv%d: registered device radio%d\n", 4277 printk(KERN_INFO "bttv%d: registered device radio%d\n",
4278 btv->c.nr,btv->radio_dev->minor & 0x1f); 4278 btv->c.nr, btv->radio_dev->num);
4279 4279
4280 /* all done */ 4280 /* all done */
4281 return 0; 4281 return 0;
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index 17aa0adb3467..0f930d351466 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -815,7 +815,7 @@ static int init_cqcam(struct parport *port)
815 } 815 }
816 816
817 printk(KERN_INFO "video%d: Colour QuickCam found on %s\n", 817 printk(KERN_INFO "video%d: Colour QuickCam found on %s\n",
818 qcam->vdev.minor, qcam->pport->name); 818 qcam->vdev.num, qcam->pport->name);
819 819
820 qcams[num_cams++] = qcam; 820 qcams[num_cams++] = qcam;
821 821
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index fc9497bdd322..a8c068e1de1c 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -2059,10 +2059,10 @@ static void cafe_dfs_cam_setup(struct cafe_camera *cam)
2059 2059
2060 if (!cafe_dfs_root) 2060 if (!cafe_dfs_root)
2061 return; 2061 return;
2062 sprintf(fname, "regs-%d", cam->v4ldev.minor); 2062 sprintf(fname, "regs-%d", cam->v4ldev.num);
2063 cam->dfs_regs = debugfs_create_file(fname, 0444, cafe_dfs_root, 2063 cam->dfs_regs = debugfs_create_file(fname, 0444, cafe_dfs_root,
2064 cam, &cafe_dfs_reg_ops); 2064 cam, &cafe_dfs_reg_ops);
2065 sprintf(fname, "cam-%d", cam->v4ldev.minor); 2065 sprintf(fname, "cam-%d", cam->v4ldev.num);
2066 cam->dfs_cam_regs = debugfs_create_file(fname, 0444, cafe_dfs_root, 2066 cam->dfs_cam_regs = debugfs_create_file(fname, 0444, cafe_dfs_root,
2067 cam, &cafe_dfs_cam_ops); 2067 cam, &cafe_dfs_cam_ops);
2068} 2068}
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 1798b779a25a..16c094f77852 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -1347,7 +1347,7 @@ static void create_proc_cpia_cam(struct cam_data *cam)
1347 if (!cpia_proc_root || !cam) 1347 if (!cpia_proc_root || !cam)
1348 return; 1348 return;
1349 1349
1350 snprintf(name, sizeof(name), "video%d", cam->vdev.minor); 1350 snprintf(name, sizeof(name), "video%d", cam->vdev.num);
1351 1351
1352 ent = create_proc_entry(name, S_IFREG|S_IRUGO|S_IWUSR, cpia_proc_root); 1352 ent = create_proc_entry(name, S_IFREG|S_IRUGO|S_IWUSR, cpia_proc_root);
1353 if (!ent) 1353 if (!ent)
@@ -1372,7 +1372,7 @@ static void destroy_proc_cpia_cam(struct cam_data *cam)
1372 if (!cam || !cam->proc_entry) 1372 if (!cam || !cam->proc_entry)
1373 return; 1373 return;
1374 1374
1375 snprintf(name, sizeof(name), "video%d", cam->vdev.minor); 1375 snprintf(name, sizeof(name), "video%d", cam->vdev.num);
1376 remove_proc_entry(name, cpia_proc_root); 1376 remove_proc_entry(name, cpia_proc_root);
1377 cam->proc_entry = NULL; 1377 cam->proc_entry = NULL;
1378} 1378}
@@ -4005,7 +4005,7 @@ void cpia_unregister_camera(struct cam_data *cam)
4005 } 4005 }
4006 4006
4007#ifdef CONFIG_PROC_FS 4007#ifdef CONFIG_PROC_FS
4008 DBG("destroying /proc/cpia/video%d\n", cam->vdev.minor); 4008 DBG("destroying /proc/cpia/video%d\n", cam->vdev.num);
4009 destroy_proc_cpia_cam(cam); 4009 destroy_proc_cpia_cam(cam);
4010#endif 4010#endif
4011 if (!cam->open_count) { 4011 if (!cam->open_count) {
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 897e8d1a5c3c..1c6bd633f193 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -1973,7 +1973,7 @@ void cpia2_unregister_camera(struct camera_data *cam)
1973 } else { 1973 } else {
1974 LOG("/dev/video%d removed while open, " 1974 LOG("/dev/video%d removed while open, "
1975 "deferring video_unregister_device\n", 1975 "deferring video_unregister_device\n",
1976 cam->vdev->minor); 1976 cam->vdev->num);
1977 } 1977 }
1978} 1978}
1979 1979
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 085121c2b47f..7a1a7830a6b3 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -613,6 +613,7 @@ static int __devinit cx18_probe(struct pci_dev *dev,
613 const struct pci_device_id *pci_id) 613 const struct pci_device_id *pci_id)
614{ 614{
615 int retval = 0; 615 int retval = 0;
616 int i;
616 int vbi_buf_size; 617 int vbi_buf_size;
617 u32 devtype; 618 u32 devtype;
618 struct cx18 *cx; 619 struct cx18 *cx;
@@ -698,7 +699,8 @@ static int __devinit cx18_probe(struct pci_dev *dev,
698 699
699 /* active i2c */ 700 /* active i2c */
700 CX18_DEBUG_INFO("activating i2c...\n"); 701 CX18_DEBUG_INFO("activating i2c...\n");
701 if (init_cx18_i2c(cx)) { 702 retval = init_cx18_i2c(cx);
703 if (retval) {
702 CX18_ERR("Could not initialize i2c\n"); 704 CX18_ERR("Could not initialize i2c\n");
703 goto free_map; 705 goto free_map;
704 } 706 }
@@ -836,8 +838,11 @@ err:
836 CX18_ERR("Error %d on initialization\n", retval); 838 CX18_ERR("Error %d on initialization\n", retval);
837 cx18_log_statistics(cx); 839 cx18_log_statistics(cx);
838 840
839 kfree(cx18_cards[cx18_cards_active]); 841 i = cx->num;
840 cx18_cards[cx18_cards_active] = NULL; 842 spin_lock(&cx18_cards_lock);
843 kfree(cx18_cards[i]);
844 cx18_cards[i] = NULL;
845 spin_unlock(&cx18_cards_lock);
841 return retval; 846 return retval;
842} 847}
843 848
diff --git a/drivers/media/video/cx18/cx18-io.h b/drivers/media/video/cx18/cx18-io.h
index 197d4fbd9f95..287a5e8bf67b 100644
--- a/drivers/media/video/cx18/cx18-io.h
+++ b/drivers/media/video/cx18/cx18-io.h
@@ -39,7 +39,7 @@ static inline void cx18_io_delay(struct cx18 *cx)
39 39
40/* Statistics gathering */ 40/* Statistics gathering */
41static inline 41static inline
42void cx18_log_write_retries(struct cx18 *cx, int i, const void *addr) 42void cx18_log_write_retries(struct cx18 *cx, int i, const void __iomem *addr)
43{ 43{
44 if (i > CX18_MAX_MMIO_RETRIES) 44 if (i > CX18_MAX_MMIO_RETRIES)
45 i = CX18_MAX_MMIO_RETRIES; 45 i = CX18_MAX_MMIO_RETRIES;
@@ -48,7 +48,7 @@ void cx18_log_write_retries(struct cx18 *cx, int i, const void *addr)
48} 48}
49 49
50static inline 50static inline
51void cx18_log_read_retries(struct cx18 *cx, int i, const void *addr) 51void cx18_log_read_retries(struct cx18 *cx, int i, const void __iomem *addr)
52{ 52{
53 if (i > CX18_MAX_MMIO_RETRIES) 53 if (i > CX18_MAX_MMIO_RETRIES)
54 i = CX18_MAX_MMIO_RETRIES; 54 i = CX18_MAX_MMIO_RETRIES;
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 0c8e7542cf60..e5ff7705b7a1 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -200,16 +200,18 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
200/* Initialize v4l2 variables and register v4l2 devices */ 200/* Initialize v4l2 variables and register v4l2 devices */
201int cx18_streams_setup(struct cx18 *cx) 201int cx18_streams_setup(struct cx18 *cx)
202{ 202{
203 int type; 203 int type, ret;
204 204
205 /* Setup V4L2 Devices */ 205 /* Setup V4L2 Devices */
206 for (type = 0; type < CX18_MAX_STREAMS; type++) { 206 for (type = 0; type < CX18_MAX_STREAMS; type++) {
207 /* Prepare device */ 207 /* Prepare device */
208 if (cx18_prep_dev(cx, type)) 208 ret = cx18_prep_dev(cx, type);
209 if (ret < 0)
209 break; 210 break;
210 211
211 /* Allocate Stream */ 212 /* Allocate Stream */
212 if (cx18_stream_alloc(&cx->streams[type])) 213 ret = cx18_stream_alloc(&cx->streams[type]);
214 if (ret < 0)
213 break; 215 break;
214 } 216 }
215 if (type == CX18_MAX_STREAMS) 217 if (type == CX18_MAX_STREAMS)
@@ -217,14 +219,14 @@ int cx18_streams_setup(struct cx18 *cx)
217 219
218 /* One or more streams could not be initialized. Clean 'em all up. */ 220 /* One or more streams could not be initialized. Clean 'em all up. */
219 cx18_streams_cleanup(cx, 0); 221 cx18_streams_cleanup(cx, 0);
220 return -ENOMEM; 222 return ret;
221} 223}
222 224
223static int cx18_reg_dev(struct cx18 *cx, int type) 225static int cx18_reg_dev(struct cx18 *cx, int type)
224{ 226{
225 struct cx18_stream *s = &cx->streams[type]; 227 struct cx18_stream *s = &cx->streams[type];
226 int vfl_type = cx18_stream_info[type].vfl_type; 228 int vfl_type = cx18_stream_info[type].vfl_type;
227 int num; 229 int num, ret;
228 230
229 /* TODO: Shouldn't this be a VFL_TYPE_TRANSPORT or something? 231 /* TODO: Shouldn't this be a VFL_TYPE_TRANSPORT or something?
230 * We need a VFL_TYPE_TS defined. 232 * We need a VFL_TYPE_TS defined.
@@ -233,9 +235,10 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
233 /* just return if no DVB is supported */ 235 /* just return if no DVB is supported */
234 if ((cx->card->hw_all & CX18_HW_DVB) == 0) 236 if ((cx->card->hw_all & CX18_HW_DVB) == 0)
235 return 0; 237 return 0;
236 if (cx18_dvb_register(s) < 0) { 238 ret = cx18_dvb_register(s);
239 if (ret < 0) {
237 CX18_ERR("DVB failed to register\n"); 240 CX18_ERR("DVB failed to register\n");
238 return -EINVAL; 241 return ret;
239 } 242 }
240 } 243 }
241 244
@@ -252,12 +255,13 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
252 } 255 }
253 256
254 /* Register device. First try the desired minor, then any free one. */ 257 /* Register device. First try the desired minor, then any free one. */
255 if (video_register_device(s->v4l2dev, vfl_type, num)) { 258 ret = video_register_device(s->v4l2dev, vfl_type, num);
259 if (ret < 0) {
256 CX18_ERR("Couldn't register v4l2 device for %s kernel number %d\n", 260 CX18_ERR("Couldn't register v4l2 device for %s kernel number %d\n",
257 s->name, num); 261 s->name, num);
258 video_device_release(s->v4l2dev); 262 video_device_release(s->v4l2dev);
259 s->v4l2dev = NULL; 263 s->v4l2dev = NULL;
260 return -ENOMEM; 264 return ret;
261 } 265 }
262 num = s->v4l2dev->num; 266 num = s->v4l2dev->num;
263 267
@@ -290,18 +294,22 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
290int cx18_streams_register(struct cx18 *cx) 294int cx18_streams_register(struct cx18 *cx)
291{ 295{
292 int type; 296 int type;
293 int err = 0; 297 int err;
298 int ret = 0;
294 299
295 /* Register V4L2 devices */ 300 /* Register V4L2 devices */
296 for (type = 0; type < CX18_MAX_STREAMS; type++) 301 for (type = 0; type < CX18_MAX_STREAMS; type++) {
297 err |= cx18_reg_dev(cx, type); 302 err = cx18_reg_dev(cx, type);
303 if (err && ret == 0)
304 ret = err;
305 }
298 306
299 if (err == 0) 307 if (ret == 0)
300 return 0; 308 return 0;
301 309
302 /* One or more streams could not be initialized. Clean 'em all up. */ 310 /* One or more streams could not be initialized. Clean 'em all up. */
303 cx18_streams_cleanup(cx, 1); 311 cx18_streams_cleanup(cx, 1);
304 return -ENOMEM; 312 return ret;
305} 313}
306 314
307/* Unregister v4l2 devices */ 315/* Unregister v4l2 devices */
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index 395c11fa47ce..00831f3ef8f5 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -1815,7 +1815,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
1815 cx23885_mc417_init(dev); 1815 cx23885_mc417_init(dev);
1816 1816
1817 printk(KERN_INFO "%s: registered device video%d [mpeg]\n", 1817 printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
1818 dev->name, dev->v4l_device->minor & 0x1f); 1818 dev->name, dev->v4l_device->num);
1819 1819
1820 return 0; 1820 return 0;
1821} 1821}
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index ab3110d6046c..c742a10be5cb 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -1543,7 +1543,7 @@ int cx23885_video_register(struct cx23885_dev *dev)
1543 goto fail_unreg; 1543 goto fail_unreg;
1544 } 1544 }
1545 printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n", 1545 printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n",
1546 dev->name, dev->video_dev->minor & 0x1f); 1546 dev->name, dev->video_dev->num);
1547 /* initial device configuration */ 1547 /* initial device configuration */
1548 mutex_lock(&dev->lock); 1548 mutex_lock(&dev->lock);
1549 cx23885_set_tvnorm(dev, dev->tvnorm); 1549 cx23885_set_tvnorm(dev, dev->tvnorm);
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index e71369754305..078be6319556 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -1285,7 +1285,7 @@ static int blackbird_register_video(struct cx8802_dev *dev)
1285 return err; 1285 return err;
1286 } 1286 }
1287 printk(KERN_INFO "%s/2: registered device video%d [mpeg]\n", 1287 printk(KERN_INFO "%s/2: registered device video%d [mpeg]\n",
1288 dev->core->name,dev->mpeg_dev->minor & 0x1f); 1288 dev->core->name, dev->mpeg_dev->num);
1289 return 0; 1289 return 0;
1290} 1290}
1291 1291
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index fbc224f46e0e..5bcbb4cc7c2a 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -3044,8 +3044,8 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
3044 3044
3045 memcpy(&core->board, &cx88_boards[core->boardnr], sizeof(core->board)); 3045 memcpy(&core->board, &cx88_boards[core->boardnr], sizeof(core->board));
3046 3046
3047 if (!core->board.num_frontends) 3047 if (!core->board.num_frontends && (core->board.mpeg & CX88_MPEG_DVB))
3048 core->board.num_frontends=1; 3048 core->board.num_frontends = 1;
3049 3049
3050 info_printk(core, "subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n", 3050 info_printk(core, "subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n",
3051 pci->subsystem_vendor, pci->subsystem_device, core->board.name, 3051 pci->subsystem_vendor, pci->subsystem_device, core->board.name,
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index 6968ab0181aa..cf6c30d4e545 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -789,7 +789,7 @@ static int dvb_register(struct cx8802_dev *dev)
789 if (fe0->dvb.frontend) 789 if (fe0->dvb.frontend)
790 fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL; 790 fe0->dvb.frontend->ops.i2c_gate_ctrl = NULL;
791 if (attach_xc3028(0x61, dev) < 0) 791 if (attach_xc3028(0x61, dev) < 0)
792 return -EINVAL; 792 goto frontend_detach;
793 break; 793 break;
794 case CX88_BOARD_PCHDTV_HD3000: 794 case CX88_BOARD_PCHDTV_HD3000:
795 fe0->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000, 795 fe0->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000,
@@ -1058,7 +1058,6 @@ static int dvb_register(struct cx8802_dev *dev)
1058 goto frontend_detach; 1058 goto frontend_detach;
1059 core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage; 1059 core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
1060 fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage; 1060 fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
1061
1062 } 1061 }
1063 } 1062 }
1064 break; 1063 break;
@@ -1110,10 +1109,7 @@ static int dvb_register(struct cx8802_dev *dev)
1110 &dev->pci->dev, adapter_nr, mfe_shared); 1109 &dev->pci->dev, adapter_nr, mfe_shared);
1111 1110
1112frontend_detach: 1111frontend_detach:
1113 if (fe0->dvb.frontend) { 1112 videobuf_dvb_dealloc_frontends(&dev->frontends);
1114 dvb_frontend_detach(fe0->dvb.frontend);
1115 fe0->dvb.frontend = NULL;
1116 }
1117 return -EINVAL; 1113 return -EINVAL;
1118} 1114}
1119 1115
@@ -1246,8 +1242,11 @@ fail_core:
1246 1242
1247static int cx8802_dvb_remove(struct cx8802_driver *drv) 1243static int cx8802_dvb_remove(struct cx8802_driver *drv)
1248{ 1244{
1245 struct cx88_core *core = drv->core;
1249 struct cx8802_dev *dev = drv->core->dvbdev; 1246 struct cx8802_dev *dev = drv->core->dvbdev;
1250 1247
1248 dprintk( 1, "%s\n", __func__);
1249
1251 videobuf_dvb_unregister_bus(&dev->frontends); 1250 videobuf_dvb_unregister_bus(&dev->frontends);
1252 1251
1253 vp3054_i2c_remove(dev); 1252 vp3054_i2c_remove(dev);
diff --git a/drivers/media/video/cx88/cx88-i2c.c b/drivers/media/video/cx88/cx88-i2c.c
index 01de23007095..1ab691d20692 100644
--- a/drivers/media/video/cx88/cx88-i2c.c
+++ b/drivers/media/video/cx88/cx88-i2c.c
@@ -116,8 +116,10 @@ static int detach_inform(struct i2c_client *client)
116 116
117void cx88_call_i2c_clients(struct cx88_core *core, unsigned int cmd, void *arg) 117void cx88_call_i2c_clients(struct cx88_core *core, unsigned int cmd, void *arg)
118{ 118{
119#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
119 struct videobuf_dvb_frontends *f = &core->dvbdev->frontends; 120 struct videobuf_dvb_frontends *f = &core->dvbdev->frontends;
120 struct videobuf_dvb_frontend *fe = NULL; 121 struct videobuf_dvb_frontend *fe = NULL;
122#endif
121 if (0 != core->i2c_rc) 123 if (0 != core->i2c_rc)
122 return; 124 return;
123 125
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 6df5cf314186..a1c435b4b1cd 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -768,8 +768,11 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
768{ 768{
769 struct cx8802_dev *dev; 769 struct cx8802_dev *dev;
770 struct cx88_core *core; 770 struct cx88_core *core;
771 int err;
772#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
771 struct videobuf_dvb_frontend *demod; 773 struct videobuf_dvb_frontend *demod;
772 int err,i; 774 int i;
775#endif
773 776
774 /* general setup */ 777 /* general setup */
775 core = cx88_core_get(pci_dev); 778 core = cx88_core_get(pci_dev);
@@ -782,11 +785,6 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
782 if (!core->board.mpeg) 785 if (!core->board.mpeg)
783 goto fail_core; 786 goto fail_core;
784 787
785 if (!core->board.num_frontends) {
786 printk(KERN_ERR "%s() .num_frontends should be non-zero, err = %d\n", __func__, err);
787 goto fail_core;
788 }
789
790 err = -ENOMEM; 788 err = -ENOMEM;
791 dev = kzalloc(sizeof(*dev),GFP_KERNEL); 789 dev = kzalloc(sizeof(*dev),GFP_KERNEL);
792 if (NULL == dev) 790 if (NULL == dev)
@@ -801,10 +799,12 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
801 INIT_LIST_HEAD(&dev->drvlist); 799 INIT_LIST_HEAD(&dev->drvlist);
802 list_add_tail(&dev->devlist,&cx8802_devlist); 800 list_add_tail(&dev->devlist,&cx8802_devlist);
803 801
802#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
804 mutex_init(&dev->frontends.lock); 803 mutex_init(&dev->frontends.lock);
805 INIT_LIST_HEAD(&dev->frontends.felist); 804 INIT_LIST_HEAD(&dev->frontends.felist);
806 805
807 printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__, core->board.num_frontends); 806 if (core->board.num_frontends)
807 printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__, core->board.num_frontends);
808 808
809 for (i = 1; i <= core->board.num_frontends; i++) { 809 for (i = 1; i <= core->board.num_frontends; i++) {
810 demod = videobuf_dvb_alloc_frontend(&dev->frontends, i); 810 demod = videobuf_dvb_alloc_frontend(&dev->frontends, i);
@@ -814,6 +814,7 @@ static int __devinit cx8802_probe(struct pci_dev *pci_dev,
814 goto fail_free; 814 goto fail_free;
815 } 815 }
816 } 816 }
817#endif
817 818
818 /* Maintain a reference so cx88-video can query the 8802 device. */ 819 /* Maintain a reference so cx88-video can query the 8802 device. */
819 core->dvbdev = dev; 820 core->dvbdev = dev;
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 3904b73f52ee..61265fd04d56 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1911,7 +1911,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1911 goto fail_unreg; 1911 goto fail_unreg;
1912 } 1912 }
1913 printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n", 1913 printk(KERN_INFO "%s/0: registered device video%d [v4l2]\n",
1914 core->name,dev->video_dev->minor & 0x1f); 1914 core->name, dev->video_dev->num);
1915 1915
1916 dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi"); 1916 dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi");
1917 err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI, 1917 err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
@@ -1922,7 +1922,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1922 goto fail_unreg; 1922 goto fail_unreg;
1923 } 1923 }
1924 printk(KERN_INFO "%s/0: registered device vbi%d\n", 1924 printk(KERN_INFO "%s/0: registered device vbi%d\n",
1925 core->name,dev->vbi_dev->minor & 0x1f); 1925 core->name, dev->vbi_dev->num);
1926 1926
1927 if (core->board.radio.type == CX88_RADIO) { 1927 if (core->board.radio.type == CX88_RADIO) {
1928 dev->radio_dev = cx88_vdev_init(core,dev->pci, 1928 dev->radio_dev = cx88_vdev_init(core,dev->pci,
@@ -1935,7 +1935,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1935 goto fail_unreg; 1935 goto fail_unreg;
1936 } 1936 }
1937 printk(KERN_INFO "%s/0: registered device radio%d\n", 1937 printk(KERN_INFO "%s/0: registered device radio%d\n",
1938 core->name,dev->radio_dev->minor & 0x1f); 1938 core->name, dev->radio_dev->num);
1939 } 1939 }
1940 1940
1941 /* everything worked */ 1941 /* everything worked */
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index c53649e5315b..a1ab2ef45578 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -2042,7 +2042,7 @@ static int em28xx_init_dev(struct em28xx **devhandle, struct usb_device *udev,
2042 goto fail_unreg; 2042 goto fail_unreg;
2043 } 2043 }
2044 em28xx_info("Registered radio device as /dev/radio%d\n", 2044 em28xx_info("Registered radio device as /dev/radio%d\n",
2045 dev->radio_dev->minor & 0x1f); 2045 dev->radio_dev->num);
2046 } 2046 }
2047 2047
2048 /* init video dma queues */ 2048 /* init video dma queues */
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 7a85c41b0eea..9d0ef96c23ff 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -588,7 +588,7 @@ static int et61x251_stream_interrupt(struct et61x251_device* cam)
588 cam->state |= DEV_MISCONFIGURED; 588 cam->state |= DEV_MISCONFIGURED;
589 DBG(1, "URB timeout reached. The camera is misconfigured. To " 589 DBG(1, "URB timeout reached. The camera is misconfigured. To "
590 "use it, close and open /dev/video%d again.", 590 "use it, close and open /dev/video%d again.",
591 cam->v4ldev->minor); 591 cam->v4ldev->num);
592 return -EIO; 592 return -EIO;
593 } 593 }
594 594
@@ -1195,7 +1195,7 @@ static void et61x251_release_resources(struct kref *kref)
1195 1195
1196 cam = container_of(kref, struct et61x251_device, kref); 1196 cam = container_of(kref, struct et61x251_device, kref);
1197 1197
1198 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor); 1198 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
1199 video_set_drvdata(cam->v4ldev, NULL); 1199 video_set_drvdata(cam->v4ldev, NULL);
1200 video_unregister_device(cam->v4ldev); 1200 video_unregister_device(cam->v4ldev);
1201 usb_put_dev(cam->usbdev); 1201 usb_put_dev(cam->usbdev);
@@ -1237,7 +1237,7 @@ static int et61x251_open(struct inode* inode, struct file* filp)
1237 1237
1238 if (cam->users) { 1238 if (cam->users) {
1239 DBG(2, "Device /dev/video%d is already in use", 1239 DBG(2, "Device /dev/video%d is already in use",
1240 cam->v4ldev->minor); 1240 cam->v4ldev->num);
1241 DBG(3, "Simultaneous opens are not supported"); 1241 DBG(3, "Simultaneous opens are not supported");
1242 if ((filp->f_flags & O_NONBLOCK) || 1242 if ((filp->f_flags & O_NONBLOCK) ||
1243 (filp->f_flags & O_NDELAY)) { 1243 (filp->f_flags & O_NDELAY)) {
@@ -1280,7 +1280,7 @@ static int et61x251_open(struct inode* inode, struct file* filp)
1280 cam->frame_count = 0; 1280 cam->frame_count = 0;
1281 et61x251_empty_framequeues(cam); 1281 et61x251_empty_framequeues(cam);
1282 1282
1283 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor); 1283 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
1284 1284
1285out: 1285out:
1286 mutex_unlock(&cam->open_mutex); 1286 mutex_unlock(&cam->open_mutex);
@@ -1304,7 +1304,7 @@ static int et61x251_release(struct inode* inode, struct file* filp)
1304 cam->users--; 1304 cam->users--;
1305 wake_up_interruptible_nr(&cam->wait_open, 1); 1305 wake_up_interruptible_nr(&cam->wait_open, 1);
1306 1306
1307 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor); 1307 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
1308 1308
1309 kref_put(&cam->kref, et61x251_release_resources); 1309 kref_put(&cam->kref, et61x251_release_resources);
1310 1310
@@ -1845,7 +1845,7 @@ et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
1845 cam->state |= DEV_MISCONFIGURED; 1845 cam->state |= DEV_MISCONFIGURED;
1846 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To " 1846 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
1847 "use the camera, close and open /dev/video%d again.", 1847 "use the camera, close and open /dev/video%d again.",
1848 cam->v4ldev->minor); 1848 cam->v4ldev->num);
1849 return -EIO; 1849 return -EIO;
1850 } 1850 }
1851 1851
@@ -1858,7 +1858,7 @@ et61x251_vidioc_s_crop(struct et61x251_device* cam, void __user * arg)
1858 cam->state |= DEV_MISCONFIGURED; 1858 cam->state |= DEV_MISCONFIGURED;
1859 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To " 1859 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
1860 "use the camera, close and open /dev/video%d again.", 1860 "use the camera, close and open /dev/video%d again.",
1861 cam->v4ldev->minor); 1861 cam->v4ldev->num);
1862 return -ENOMEM; 1862 return -ENOMEM;
1863 } 1863 }
1864 1864
@@ -2068,7 +2068,7 @@ et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
2068 cam->state |= DEV_MISCONFIGURED; 2068 cam->state |= DEV_MISCONFIGURED;
2069 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To " 2069 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
2070 "use the camera, close and open /dev/video%d again.", 2070 "use the camera, close and open /dev/video%d again.",
2071 cam->v4ldev->minor); 2071 cam->v4ldev->num);
2072 return -EIO; 2072 return -EIO;
2073 } 2073 }
2074 2074
@@ -2080,7 +2080,7 @@ et61x251_vidioc_try_s_fmt(struct et61x251_device* cam, unsigned int cmd,
2080 cam->state |= DEV_MISCONFIGURED; 2080 cam->state |= DEV_MISCONFIGURED;
2081 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To " 2081 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
2082 "use the camera, close and open /dev/video%d again.", 2082 "use the camera, close and open /dev/video%d again.",
2083 cam->v4ldev->minor); 2083 cam->v4ldev->num);
2084 return -ENOMEM; 2084 return -ENOMEM;
2085 } 2085 }
2086 2086
@@ -2128,7 +2128,7 @@ et61x251_vidioc_s_jpegcomp(struct et61x251_device* cam, void __user * arg)
2128 cam->state |= DEV_MISCONFIGURED; 2128 cam->state |= DEV_MISCONFIGURED;
2129 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware " 2129 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
2130 "problems. To use the camera, close and open " 2130 "problems. To use the camera, close and open "
2131 "/dev/video%d again.", cam->v4ldev->minor); 2131 "/dev/video%d again.", cam->v4ldev->num);
2132 return -EIO; 2132 return -EIO;
2133 } 2133 }
2134 2134
@@ -2605,7 +2605,7 @@ et61x251_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
2605 goto fail; 2605 goto fail;
2606 } 2606 }
2607 2607
2608 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor); 2608 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
2609 2609
2610 cam->module_param.force_munmap = force_munmap[dev_nr]; 2610 cam->module_param.force_munmap = force_munmap[dev_nr];
2611 cam->module_param.frame_timeout = frame_timeout[dev_nr]; 2611 cam->module_param.frame_timeout = frame_timeout[dev_nr];
@@ -2658,7 +2658,7 @@ static void et61x251_usb_disconnect(struct usb_interface* intf)
2658 if (cam->users) { 2658 if (cam->users) {
2659 DBG(2, "Device /dev/video%d is open! Deregistration and " 2659 DBG(2, "Device /dev/video%d is open! Deregistration and "
2660 "memory deallocation are deferred.", 2660 "memory deallocation are deferred.",
2661 cam->v4ldev->minor); 2661 cam->v4ldev->num);
2662 cam->state |= DEV_MISCONFIGURED; 2662 cam->state |= DEV_MISCONFIGURED;
2663 et61x251_stop_transfer(cam); 2663 et61x251_stop_transfer(cam);
2664 cam->state |= DEV_DISCONNECTED; 2664 cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index aeaa13f6cb36..d36485023b68 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1211,6 +1211,10 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
1211 1211
1212 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { 1212 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
1213 ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std); 1213 ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std);
1214 /* Turn off the output signal. The mpeg decoder is not yet
1215 active so without this you would get a green image until the
1216 mpeg decoder becomes active. */
1217 ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL);
1214 } 1218 }
1215 1219
1216 /* clear interrupt mask, effectively disabling interrupts */ 1220 /* clear interrupt mask, effectively disabling interrupts */
@@ -1330,6 +1334,10 @@ int ivtv_init_on_first_open(struct ivtv *itv)
1330 ivtv_s_frequency(NULL, &fh, &vf); 1334 ivtv_s_frequency(NULL, &fh, &vf);
1331 1335
1332 if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) { 1336 if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) {
1337 /* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes
1338 the mpeg decoder so now the saa7127 receives a proper
1339 signal. */
1340 ivtv_saa7127(itv, VIDIOC_STREAMON, NULL);
1333 ivtv_init_mpeg_decoder(itv); 1341 ivtv_init_mpeg_decoder(itv);
1334 } 1342 }
1335 ivtv_s_std(NULL, &fh, &itv->tuner_std); 1343 ivtv_s_std(NULL, &fh, &itv->tuner_std);
@@ -1366,6 +1374,10 @@ static void ivtv_remove(struct pci_dev *pci_dev)
1366 1374
1367 /* Stop all decoding */ 1375 /* Stop all decoding */
1368 IVTV_DEBUG_INFO("Stopping decoding\n"); 1376 IVTV_DEBUG_INFO("Stopping decoding\n");
1377
1378 /* Turn off the TV-out */
1379 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
1380 ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL);
1369 if (atomic_read(&itv->decoding) > 0) { 1381 if (atomic_read(&itv->decoding) > 0) {
1370 int type; 1382 int type;
1371 1383
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 24700c211d52..41dbbe9621a1 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -726,6 +726,7 @@ int ivtv_saa7127(struct ivtv *itv, unsigned int cmd, void *arg)
726{ 726{
727 return ivtv_call_i2c_client(itv, IVTV_SAA7127_I2C_ADDR, cmd, arg); 727 return ivtv_call_i2c_client(itv, IVTV_SAA7127_I2C_ADDR, cmd, arg);
728} 728}
729EXPORT_SYMBOL(ivtv_saa7127);
729 730
730int ivtv_saa717x(struct ivtv *itv, unsigned int cmd, void *arg) 731int ivtv_saa717x(struct ivtv *itv, unsigned int cmd, void *arg)
731{ 732{
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 208fb54842f2..4bae38d21ef6 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1756,12 +1756,12 @@ static int ivtv_default(struct file *file, void *fh, int cmd, void *arg)
1756 return 0; 1756 return 0;
1757} 1757}
1758 1758
1759static int ivtv_serialized_ioctl(struct ivtv *itv, struct inode *inode, struct file *filp, 1759static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
1760 unsigned int cmd, unsigned long arg) 1760 unsigned int cmd, unsigned long arg)
1761{ 1761{
1762 struct video_device *vfd = video_devdata(filp); 1762 struct video_device *vfd = video_devdata(filp);
1763 struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data; 1763 struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
1764 int ret; 1764 long ret;
1765 1765
1766 /* Filter dvb ioctls that cannot be handled by the v4l ioctl framework */ 1766 /* Filter dvb ioctls that cannot be handled by the v4l ioctl framework */
1767 switch (cmd) { 1767 switch (cmd) {
@@ -1830,20 +1830,19 @@ static int ivtv_serialized_ioctl(struct ivtv *itv, struct inode *inode, struct f
1830 1830
1831 if (ivtv_debug & IVTV_DBGFLG_IOCTL) 1831 if (ivtv_debug & IVTV_DBGFLG_IOCTL)
1832 vfd->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG; 1832 vfd->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG;
1833 ret = video_ioctl2(inode, filp, cmd, arg); 1833 ret = __video_ioctl2(filp, cmd, arg);
1834 vfd->debug = 0; 1834 vfd->debug = 0;
1835 return ret; 1835 return ret;
1836} 1836}
1837 1837
1838int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 1838long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1839 unsigned long arg)
1840{ 1839{
1841 struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data; 1840 struct ivtv_open_id *id = (struct ivtv_open_id *)filp->private_data;
1842 struct ivtv *itv = id->itv; 1841 struct ivtv *itv = id->itv;
1843 int res; 1842 long res;
1844 1843
1845 mutex_lock(&itv->serialize_lock); 1844 mutex_lock(&itv->serialize_lock);
1846 res = ivtv_serialized_ioctl(itv, inode, filp, cmd, arg); 1845 res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
1847 mutex_unlock(&itv->serialize_lock); 1846 mutex_unlock(&itv->serialize_lock);
1848 return res; 1847 return res;
1849} 1848}
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.h b/drivers/media/video/ivtv/ivtv-ioctl.h
index 70188588b4f4..58f003412afd 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.h
+++ b/drivers/media/video/ivtv/ivtv-ioctl.h
@@ -30,7 +30,6 @@ void ivtv_set_funcs(struct video_device *vdev);
30int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std); 30int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std);
31int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf); 31int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
32int ivtv_s_input(struct file *file, void *fh, unsigned int inp); 32int ivtv_s_input(struct file *file, void *fh, unsigned int inp);
33int ivtv_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, 33long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
34 unsigned long arg);
35 34
36#endif 35#endif
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 5bbf31e39304..9b7aa79eb267 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -48,7 +48,7 @@ static const struct file_operations ivtv_v4l2_enc_fops = {
48 .read = ivtv_v4l2_read, 48 .read = ivtv_v4l2_read,
49 .write = ivtv_v4l2_write, 49 .write = ivtv_v4l2_write,
50 .open = ivtv_v4l2_open, 50 .open = ivtv_v4l2_open,
51 .ioctl = ivtv_v4l2_ioctl, 51 .unlocked_ioctl = ivtv_v4l2_ioctl,
52 .compat_ioctl = v4l_compat_ioctl32, 52 .compat_ioctl = v4l_compat_ioctl32,
53 .release = ivtv_v4l2_close, 53 .release = ivtv_v4l2_close,
54 .poll = ivtv_v4l2_enc_poll, 54 .poll = ivtv_v4l2_enc_poll,
@@ -59,7 +59,7 @@ static const struct file_operations ivtv_v4l2_dec_fops = {
59 .read = ivtv_v4l2_read, 59 .read = ivtv_v4l2_read,
60 .write = ivtv_v4l2_write, 60 .write = ivtv_v4l2_write,
61 .open = ivtv_v4l2_open, 61 .open = ivtv_v4l2_open,
62 .ioctl = ivtv_v4l2_ioctl, 62 .unlocked_ioctl = ivtv_v4l2_ioctl,
63 .compat_ioctl = v4l_compat_ioctl32, 63 .compat_ioctl = v4l_compat_ioctl32,
64 .release = ivtv_v4l2_close, 64 .release = ivtv_v4l2_close,
65 .poll = ivtv_v4l2_dec_poll, 65 .poll = ivtv_v4l2_dec_poll,
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 8a4a150b12fb..921e281876f8 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -48,6 +48,7 @@
48#endif 48#endif
49 49
50#include "ivtv-driver.h" 50#include "ivtv-driver.h"
51#include "ivtv-i2c.h"
51#include "ivtv-udma.h" 52#include "ivtv-udma.h"
52#include "ivtv-mailbox.h" 53#include "ivtv-mailbox.h"
53 54
@@ -894,11 +895,16 @@ static int ivtvfb_blank(int blank_mode, struct fb_info *info)
894 switch (blank_mode) { 895 switch (blank_mode) {
895 case FB_BLANK_UNBLANK: 896 case FB_BLANK_UNBLANK:
896 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 1); 897 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 1);
898 ivtv_saa7127(itv, VIDIOC_STREAMON, NULL);
897 break; 899 break;
898 case FB_BLANK_NORMAL: 900 case FB_BLANK_NORMAL:
899 case FB_BLANK_HSYNC_SUSPEND: 901 case FB_BLANK_HSYNC_SUSPEND:
900 case FB_BLANK_VSYNC_SUSPEND: 902 case FB_BLANK_VSYNC_SUSPEND:
903 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
904 ivtv_saa7127(itv, VIDIOC_STREAMON, NULL);
905 break;
901 case FB_BLANK_POWERDOWN: 906 case FB_BLANK_POWERDOWN:
907 ivtv_saa7127(itv, VIDIOC_STREAMOFF, NULL);
902 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0); 908 ivtv_vapi(itv, CX2341X_OSD_SET_STATE, 1, 0);
903 break; 909 break;
904 } 910 }
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.c b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
index a1252d673b41..273d2a1aa220 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
@@ -402,6 +402,10 @@ static int pvr2_encoder_prep_config(struct pvr2_hdw *hdw)
402 ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 0,3,0,0); 402 ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 0,3,0,0);
403 ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4,15,0,0,0); 403 ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4,15,0,0,0);
404 404
405 /* prevent the PTSs from slowly drifting away in the generated
406 MPEG stream */
407 ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC, 2, 4, 1);
408
405 return ret; 409 return ret;
406} 410}
407 411
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 94265bd3d926..5b81ba469641 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -60,7 +60,6 @@ static struct pvr2_hdw *unit_pointers[PVR_NUM] = {[ 0 ... PVR_NUM-1 ] = NULL};
60static DEFINE_MUTEX(pvr2_unit_mtx); 60static DEFINE_MUTEX(pvr2_unit_mtx);
61 61
62static int ctlchg; 62static int ctlchg;
63static int initusbreset = 1;
64static int procreload; 63static int procreload;
65static int tuner[PVR_NUM] = { [0 ... PVR_NUM-1] = -1 }; 64static int tuner[PVR_NUM] = { [0 ... PVR_NUM-1] = -1 };
66static int tolerance[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 }; 65static int tolerance[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 };
@@ -71,8 +70,6 @@ module_param(ctlchg, int, S_IRUGO|S_IWUSR);
71MODULE_PARM_DESC(ctlchg, "0=optimize ctl change 1=always accept new ctl value"); 70MODULE_PARM_DESC(ctlchg, "0=optimize ctl change 1=always accept new ctl value");
72module_param(init_pause_msec, int, S_IRUGO|S_IWUSR); 71module_param(init_pause_msec, int, S_IRUGO|S_IWUSR);
73MODULE_PARM_DESC(init_pause_msec, "hardware initialization settling delay"); 72MODULE_PARM_DESC(init_pause_msec, "hardware initialization settling delay");
74module_param(initusbreset, int, S_IRUGO|S_IWUSR);
75MODULE_PARM_DESC(initusbreset, "Do USB reset device on probe");
76module_param(procreload, int, S_IRUGO|S_IWUSR); 73module_param(procreload, int, S_IRUGO|S_IWUSR);
77MODULE_PARM_DESC(procreload, 74MODULE_PARM_DESC(procreload,
78 "Attempt init failure recovery with firmware reload"); 75 "Attempt init failure recovery with firmware reload");
@@ -1967,9 +1964,6 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
1967 } 1964 }
1968 hdw->fw1_state = FW1_STATE_OK; 1965 hdw->fw1_state = FW1_STATE_OK;
1969 1966
1970 if (initusbreset) {
1971 pvr2_hdw_device_reset(hdw);
1972 }
1973 if (!pvr2_hdw_dev_ok(hdw)) return; 1967 if (!pvr2_hdw_dev_ok(hdw)) return;
1974 1968
1975 for (idx = 0; idx < hdw->hdw_desc->client_modules.cnt; idx++) { 1969 for (idx = 0; idx < hdw->hdw_desc->client_modules.cnt; idx++) {
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index f048d80b77e5..97ed95957992 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -168,7 +168,7 @@ static const char *get_v4l_name(int v4l_type)
168 * This is part of Video 4 Linux API. The procedure handles ioctl() calls. 168 * This is part of Video 4 Linux API. The procedure handles ioctl() calls.
169 * 169 *
170 */ 170 */
171static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file, 171static int __pvr2_v4l2_do_ioctl(struct file *file,
172 unsigned int cmd, void *arg) 172 unsigned int cmd, void *arg)
173{ 173{
174 struct pvr2_v4l2_fh *fh = file->private_data; 174 struct pvr2_v4l2_fh *fh = file->private_data;
@@ -863,8 +863,8 @@ static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
863#endif 863#endif
864 864
865 default : 865 default :
866 ret = v4l_compat_translate_ioctl(inode,file,cmd, 866 ret = v4l_compat_translate_ioctl(file, cmd,
867 arg,pvr2_v4l2_do_ioctl); 867 arg, __pvr2_v4l2_do_ioctl);
868 } 868 }
869 869
870 pvr2_hdw_commit_ctl(hdw); 870 pvr2_hdw_commit_ctl(hdw);
@@ -890,10 +890,15 @@ static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
890 return ret; 890 return ret;
891} 891}
892 892
893static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
894 unsigned int cmd, void *arg)
895{
896 return __pvr2_v4l2_do_ioctl(file, cmd, arg);
897}
893 898
894static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip) 899static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
895{ 900{
896 int minor_id = dip->devbase.minor; 901 int num = dip->devbase.num;
897 struct pvr2_hdw *hdw = dip->v4lp->channel.mc_head->hdw; 902 struct pvr2_hdw *hdw = dip->v4lp->channel.mc_head->hdw;
898 enum pvr2_config cfg = dip->config; 903 enum pvr2_config cfg = dip->config;
899 int v4l_type = dip->v4l_type; 904 int v4l_type = dip->v4l_type;
@@ -909,7 +914,7 @@ static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
909 video_unregister_device(&dip->devbase); 914 video_unregister_device(&dip->devbase);
910 915
911 printk(KERN_INFO "pvrusb2: unregistered device %s%u [%s]\n", 916 printk(KERN_INFO "pvrusb2: unregistered device %s%u [%s]\n",
912 get_v4l_name(v4l_type),minor_id & 0x1f, 917 get_v4l_name(v4l_type), num,
913 pvr2_config_get_name(cfg)); 918 pvr2_config_get_name(cfg));
914 919
915} 920}
@@ -1310,7 +1315,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
1310 } 1315 }
1311 1316
1312 printk(KERN_INFO "pvrusb2: registered device %s%u [%s]\n", 1317 printk(KERN_INFO "pvrusb2: registered device %s%u [%s]\n",
1313 get_v4l_name(dip->v4l_type),dip->devbase.minor & 0x1f, 1318 get_v4l_name(dip->v4l_type), dip->devbase.num,
1314 pvr2_config_get_name(dip->config)); 1319 pvr2_config_get_name(dip->config));
1315 1320
1316 pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw, 1321 pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index ab28389b4cda..f3897a3fdb75 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -1795,7 +1795,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
1795 goto err; 1795 goto err;
1796 } 1796 }
1797 else { 1797 else {
1798 PWC_INFO("Registered as /dev/video%d.\n", pdev->vdev->minor & 0x3F); 1798 PWC_INFO("Registered as /dev/video%d.\n", pdev->vdev->num);
1799 } 1799 }
1800 1800
1801 /* occupy slot */ 1801 /* occupy slot */
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index b686bfabbde0..249184452949 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -996,7 +996,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
996 goto fail4; 996 goto fail4;
997 } 997 }
998 printk(KERN_INFO "%s: registered device video%d [v4l2]\n", 998 printk(KERN_INFO "%s: registered device video%d [v4l2]\n",
999 dev->name,dev->video_dev->minor & 0x1f); 999 dev->name, dev->video_dev->num);
1000 1000
1001 dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi"); 1001 dev->vbi_dev = vdev_init(dev, &saa7134_video_template, "vbi");
1002 1002
@@ -1005,7 +1005,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
1005 if (err < 0) 1005 if (err < 0)
1006 goto fail4; 1006 goto fail4;
1007 printk(KERN_INFO "%s: registered device vbi%d\n", 1007 printk(KERN_INFO "%s: registered device vbi%d\n",
1008 dev->name,dev->vbi_dev->minor & 0x1f); 1008 dev->name, dev->vbi_dev->num);
1009 1009
1010 if (card_has_radio(dev)) { 1010 if (card_has_radio(dev)) {
1011 dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio"); 1011 dev->radio_dev = vdev_init(dev,&saa7134_radio_template,"radio");
@@ -1014,7 +1014,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
1014 if (err < 0) 1014 if (err < 0)
1015 goto fail4; 1015 goto fail4;
1016 printk(KERN_INFO "%s: registered device radio%d\n", 1016 printk(KERN_INFO "%s: registered device radio%d\n",
1017 dev->name,dev->radio_dev->minor & 0x1f); 1017 dev->name, dev->radio_dev->num);
1018 } 1018 }
1019 1019
1020 /* everything worked */ 1020 /* everything worked */
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 9a8766a78a0c..7f40511bcc04 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -534,7 +534,7 @@ static int empress_init(struct saa7134_dev *dev)
534 return err; 534 return err;
535 } 535 }
536 printk(KERN_INFO "%s: registered device video%d [mpeg]\n", 536 printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
537 dev->name,dev->empress_dev->minor & 0x1f); 537 dev->name, dev->empress_dev->num);
538 538
539 videobuf_queue_sg_init(&dev->empress_tsq, &saa7134_ts_qops, 539 videobuf_queue_sg_init(&dev->empress_tsq, &saa7134_ts_qops,
540 &dev->pci->dev, &dev->slock, 540 &dev->pci->dev, &dev->slock,
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index ae3949180c4e..044a2e94c34d 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -1412,7 +1412,7 @@ static int se401_probe(struct usb_interface *intf,
1412 return -EIO; 1412 return -EIO;
1413 } 1413 }
1414 dev_info(&intf->dev, "registered new video device: video%d\n", 1414 dev_info(&intf->dev, "registered new video device: video%d\n",
1415 se401->vdev.minor); 1415 se401->vdev.num);
1416 1416
1417 usb_set_intfdata (intf, se401); 1417 usb_set_intfdata (intf, se401);
1418 return 0; 1418 return 0;
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 20e30bd9364b..fcd2b62f92c4 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -1008,7 +1008,7 @@ static int sn9c102_stream_interrupt(struct sn9c102_device* cam)
1008 cam->state |= DEV_MISCONFIGURED; 1008 cam->state |= DEV_MISCONFIGURED;
1009 DBG(1, "URB timeout reached. The camera is misconfigured. " 1009 DBG(1, "URB timeout reached. The camera is misconfigured. "
1010 "To use it, close and open /dev/video%d again.", 1010 "To use it, close and open /dev/video%d again.",
1011 cam->v4ldev->minor); 1011 cam->v4ldev->num);
1012 return -EIO; 1012 return -EIO;
1013 } 1013 }
1014 1014
@@ -1734,7 +1734,7 @@ static void sn9c102_release_resources(struct kref *kref)
1734 1734
1735 cam = container_of(kref, struct sn9c102_device, kref); 1735 cam = container_of(kref, struct sn9c102_device, kref);
1736 1736
1737 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor); 1737 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
1738 video_set_drvdata(cam->v4ldev, NULL); 1738 video_set_drvdata(cam->v4ldev, NULL);
1739 video_unregister_device(cam->v4ldev); 1739 video_unregister_device(cam->v4ldev);
1740 usb_put_dev(cam->usbdev); 1740 usb_put_dev(cam->usbdev);
@@ -1792,7 +1792,7 @@ static int sn9c102_open(struct inode* inode, struct file* filp)
1792 1792
1793 if (cam->users) { 1793 if (cam->users) {
1794 DBG(2, "Device /dev/video%d is already in use", 1794 DBG(2, "Device /dev/video%d is already in use",
1795 cam->v4ldev->minor); 1795 cam->v4ldev->num);
1796 DBG(3, "Simultaneous opens are not supported"); 1796 DBG(3, "Simultaneous opens are not supported");
1797 /* 1797 /*
1798 open() must follow the open flags and should block 1798 open() must follow the open flags and should block
@@ -1845,7 +1845,7 @@ static int sn9c102_open(struct inode* inode, struct file* filp)
1845 cam->frame_count = 0; 1845 cam->frame_count = 0;
1846 sn9c102_empty_framequeues(cam); 1846 sn9c102_empty_framequeues(cam);
1847 1847
1848 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor); 1848 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
1849 1849
1850out: 1850out:
1851 mutex_unlock(&cam->open_mutex); 1851 mutex_unlock(&cam->open_mutex);
@@ -1870,7 +1870,7 @@ static int sn9c102_release(struct inode* inode, struct file* filp)
1870 cam->users--; 1870 cam->users--;
1871 wake_up_interruptible_nr(&cam->wait_open, 1); 1871 wake_up_interruptible_nr(&cam->wait_open, 1);
1872 1872
1873 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor); 1873 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
1874 1874
1875 kref_put(&cam->kref, sn9c102_release_resources); 1875 kref_put(&cam->kref, sn9c102_release_resources);
1876 1876
@@ -2432,7 +2432,7 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
2432 cam->state |= DEV_MISCONFIGURED; 2432 cam->state |= DEV_MISCONFIGURED;
2433 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To " 2433 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
2434 "use the camera, close and open /dev/video%d again.", 2434 "use the camera, close and open /dev/video%d again.",
2435 cam->v4ldev->minor); 2435 cam->v4ldev->num);
2436 return -EIO; 2436 return -EIO;
2437 } 2437 }
2438 2438
@@ -2445,7 +2445,7 @@ sn9c102_vidioc_s_crop(struct sn9c102_device* cam, void __user * arg)
2445 cam->state |= DEV_MISCONFIGURED; 2445 cam->state |= DEV_MISCONFIGURED;
2446 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To " 2446 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
2447 "use the camera, close and open /dev/video%d again.", 2447 "use the camera, close and open /dev/video%d again.",
2448 cam->v4ldev->minor); 2448 cam->v4ldev->num);
2449 return -ENOMEM; 2449 return -ENOMEM;
2450 } 2450 }
2451 2451
@@ -2689,7 +2689,7 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
2689 cam->state |= DEV_MISCONFIGURED; 2689 cam->state |= DEV_MISCONFIGURED;
2690 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To " 2690 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
2691 "use the camera, close and open /dev/video%d again.", 2691 "use the camera, close and open /dev/video%d again.",
2692 cam->v4ldev->minor); 2692 cam->v4ldev->num);
2693 return -EIO; 2693 return -EIO;
2694 } 2694 }
2695 2695
@@ -2701,7 +2701,7 @@ sn9c102_vidioc_try_s_fmt(struct sn9c102_device* cam, unsigned int cmd,
2701 cam->state |= DEV_MISCONFIGURED; 2701 cam->state |= DEV_MISCONFIGURED;
2702 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To " 2702 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
2703 "use the camera, close and open /dev/video%d again.", 2703 "use the camera, close and open /dev/video%d again.",
2704 cam->v4ldev->minor); 2704 cam->v4ldev->num);
2705 return -ENOMEM; 2705 return -ENOMEM;
2706 } 2706 }
2707 2707
@@ -2748,7 +2748,7 @@ sn9c102_vidioc_s_jpegcomp(struct sn9c102_device* cam, void __user * arg)
2748 cam->state |= DEV_MISCONFIGURED; 2748 cam->state |= DEV_MISCONFIGURED;
2749 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware " 2749 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
2750 "problems. To use the camera, close and open " 2750 "problems. To use the camera, close and open "
2751 "/dev/video%d again.", cam->v4ldev->minor); 2751 "/dev/video%d again.", cam->v4ldev->num);
2752 return -EIO; 2752 return -EIO;
2753 } 2753 }
2754 2754
@@ -3348,7 +3348,7 @@ sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3348 goto fail; 3348 goto fail;
3349 } 3349 }
3350 3350
3351 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor); 3351 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
3352 3352
3353 video_set_drvdata(cam->v4ldev, cam); 3353 video_set_drvdata(cam->v4ldev, cam);
3354 cam->module_param.force_munmap = force_munmap[dev_nr]; 3354 cam->module_param.force_munmap = force_munmap[dev_nr];
@@ -3402,7 +3402,7 @@ static void sn9c102_usb_disconnect(struct usb_interface* intf)
3402 if (cam->users) { 3402 if (cam->users) {
3403 DBG(2, "Device /dev/video%d is open! Deregistration and " 3403 DBG(2, "Device /dev/video%d is open! Deregistration and "
3404 "memory deallocation are deferred.", 3404 "memory deallocation are deferred.",
3405 cam->v4ldev->minor); 3405 cam->v4ldev->num);
3406 cam->state |= DEV_MISCONFIGURED; 3406 cam->state |= DEV_MISCONFIGURED;
3407 sn9c102_stop_transfer(cam); 3407 sn9c102_stop_transfer(cam);
3408 cam->state |= DEV_DISCONNECTED; 3408 cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index edaea4964513..e9eb6d754d5c 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -1331,7 +1331,7 @@ static int stk_register_video_device(struct stk_camera *dev)
1331 STK_ERROR("v4l registration failed\n"); 1331 STK_ERROR("v4l registration failed\n");
1332 else 1332 else
1333 STK_INFO("Syntek USB2.0 Camera is now controlling video device" 1333 STK_INFO("Syntek USB2.0 Camera is now controlling video device"
1334 " /dev/video%d\n", dev->vdev.minor); 1334 " /dev/video%d\n", dev->vdev.num);
1335 return err; 1335 return err;
1336} 1336}
1337 1337
@@ -1426,7 +1426,7 @@ static void stk_camera_disconnect(struct usb_interface *interface)
1426 stk_remove_sysfs_files(&dev->vdev); 1426 stk_remove_sysfs_files(&dev->vdev);
1427 1427
1428 STK_INFO("Syntek USB2.0 Camera release resources " 1428 STK_INFO("Syntek USB2.0 Camera release resources "
1429 "video device /dev/video%d\n", dev->vdev.minor); 1429 "video device /dev/video%d\n", dev->vdev.num);
1430 1430
1431 video_unregister_device(&dev->vdev); 1431 video_unregister_device(&dev->vdev);
1432} 1432}
diff --git a/drivers/media/video/stv680.c b/drivers/media/video/stv680.c
index 9c549d935994..328c41b1517d 100644
--- a/drivers/media/video/stv680.c
+++ b/drivers/media/video/stv680.c
@@ -1470,7 +1470,8 @@ static int stv680_probe (struct usb_interface *intf, const struct usb_device_id
1470 retval = -EIO; 1470 retval = -EIO;
1471 goto error_vdev; 1471 goto error_vdev;
1472 } 1472 }
1473 PDEBUG (0, "STV(i): registered new video device: video%d", stv680->vdev->minor); 1473 PDEBUG(0, "STV(i): registered new video device: video%d",
1474 stv680->vdev->num);
1474 1475
1475 usb_set_intfdata (intf, stv680); 1476 usb_set_intfdata (intf, stv680);
1476 retval = stv680_create_sysfs_files(stv680->vdev); 1477 retval = stv680_create_sysfs_files(stv680->vdev);
diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
index 07cd87d16f69..7c575bb8184f 100644
--- a/drivers/media/video/usbvideo/usbvideo.c
+++ b/drivers/media/video/usbvideo/usbvideo.c
@@ -1059,7 +1059,7 @@ int usbvideo_RegisterVideoDevice(struct uvd *uvd)
1059 1059
1060 dev_info(&uvd->dev->dev, "%s on /dev/video%d: canvas=%s videosize=%s\n", 1060 dev_info(&uvd->dev->dev, "%s on /dev/video%d: canvas=%s videosize=%s\n",
1061 (uvd->handle != NULL) ? uvd->handle->drvName : "???", 1061 (uvd->handle != NULL) ? uvd->handle->drvName : "???",
1062 uvd->vdev.minor, tmp2, tmp1); 1062 uvd->vdev.num, tmp2, tmp1);
1063 1063
1064 usb_get_dev(uvd->dev); 1064 usb_get_dev(uvd->dev);
1065 return 0; 1065 return 0;
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 7a127d6bfdee..8e2d58bec481 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -877,7 +877,8 @@ vicam_probe( struct usb_interface *intf, const struct usb_device_id *id)
877 return -EIO; 877 return -EIO;
878 } 878 }
879 879
880 printk(KERN_INFO "ViCam webcam driver now controlling video device %d\n",cam->vdev.minor); 880 printk(KERN_INFO "ViCam webcam driver now controlling video device %d\n",
881 cam->vdev.num);
881 882
882 usb_set_intfdata (intf, cam); 883 usb_set_intfdata (intf, cam);
883 884
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index 92427fdc1459..9907b9aff2b9 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -236,7 +236,7 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
236 sizeof(struct i2c_client)); 236 sizeof(struct i2c_client));
237 237
238 sprintf(usbvision->i2c_adap.name + strlen(usbvision->i2c_adap.name), 238 sprintf(usbvision->i2c_adap.name + strlen(usbvision->i2c_adap.name),
239 " #%d", usbvision->vdev->minor & 0x1f); 239 " #%d", usbvision->vdev->num);
240 PDEBUG(DBG_I2C,"Adaptername: %s", usbvision->i2c_adap.name); 240 PDEBUG(DBG_I2C,"Adaptername: %s", usbvision->i2c_adap.name);
241 usbvision->i2c_adap.dev.parent = &usbvision->dev->dev; 241 usbvision->i2c_adap.dev.parent = &usbvision->dev->dev;
242 242
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 77aeb39b2750..d185b57fdcd0 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1440,7 +1440,7 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
1440 // vbi Device: 1440 // vbi Device:
1441 if (usbvision->vbi) { 1441 if (usbvision->vbi) {
1442 PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]", 1442 PDEBUG(DBG_PROBE, "unregister /dev/vbi%d [v4l2]",
1443 usbvision->vbi->minor & 0x1f); 1443 usbvision->vbi->num);
1444 if (usbvision->vbi->minor != -1) { 1444 if (usbvision->vbi->minor != -1) {
1445 video_unregister_device(usbvision->vbi); 1445 video_unregister_device(usbvision->vbi);
1446 } else { 1446 } else {
@@ -1452,7 +1452,7 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
1452 // Radio Device: 1452 // Radio Device:
1453 if (usbvision->rdev) { 1453 if (usbvision->rdev) {
1454 PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]", 1454 PDEBUG(DBG_PROBE, "unregister /dev/radio%d [v4l2]",
1455 usbvision->rdev->minor & 0x1f); 1455 usbvision->rdev->num);
1456 if (usbvision->rdev->minor != -1) { 1456 if (usbvision->rdev->minor != -1) {
1457 video_unregister_device(usbvision->rdev); 1457 video_unregister_device(usbvision->rdev);
1458 } else { 1458 } else {
@@ -1464,7 +1464,7 @@ static void usbvision_unregister_video(struct usb_usbvision *usbvision)
1464 // Video Device: 1464 // Video Device:
1465 if (usbvision->vdev) { 1465 if (usbvision->vdev) {
1466 PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]", 1466 PDEBUG(DBG_PROBE, "unregister /dev/video%d [v4l2]",
1467 usbvision->vdev->minor & 0x1f); 1467 usbvision->vdev->num);
1468 if (usbvision->vdev->minor != -1) { 1468 if (usbvision->vdev->minor != -1) {
1469 video_unregister_device(usbvision->vdev); 1469 video_unregister_device(usbvision->vdev);
1470 } else { 1470 } else {
@@ -1490,7 +1490,7 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
1490 goto err_exit; 1490 goto err_exit;
1491 } 1491 }
1492 printk(KERN_INFO "USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]\n", 1492 printk(KERN_INFO "USBVision[%d]: registered USBVision Video device /dev/video%d [v4l2]\n",
1493 usbvision->nr,usbvision->vdev->minor & 0x1f); 1493 usbvision->nr, usbvision->vdev->num);
1494 1494
1495 // Radio Device: 1495 // Radio Device:
1496 if (usbvision_device_data[usbvision->DevModel].Radio) { 1496 if (usbvision_device_data[usbvision->DevModel].Radio) {
@@ -1507,7 +1507,7 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
1507 goto err_exit; 1507 goto err_exit;
1508 } 1508 }
1509 printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]\n", 1509 printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device /dev/radio%d [v4l2]\n",
1510 usbvision->nr, usbvision->rdev->minor & 0x1f); 1510 usbvision->nr, usbvision->rdev->num);
1511 } 1511 }
1512 // vbi Device: 1512 // vbi Device:
1513 if (usbvision_device_data[usbvision->DevModel].vbi) { 1513 if (usbvision_device_data[usbvision->DevModel].vbi) {
@@ -1523,7 +1523,7 @@ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision)
1523 goto err_exit; 1523 goto err_exit;
1524 } 1524 }
1525 printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)\n", 1525 printk(KERN_INFO "USBVision[%d]: registered USBVision VBI device /dev/vbi%d [v4l2] (Not Working Yet!)\n",
1526 usbvision->nr,usbvision->vbi->minor & 0x1f); 1526 usbvision->nr, usbvision->vbi->num);
1527 } 1527 }
1528 // all done 1528 // all done
1529 return 0; 1529 return 0;
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 78e4c4e09d89..758dfefaba8d 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -464,7 +464,7 @@ static int uvc_v4l2_release(struct inode *inode, struct file *file)
464 return 0; 464 return 0;
465} 465}
466 466
467static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file, 467static int __uvc_v4l2_do_ioctl(struct file *file,
468 unsigned int cmd, void *arg) 468 unsigned int cmd, void *arg)
469{ 469{
470 struct video_device *vdev = video_devdata(file); 470 struct video_device *vdev = video_devdata(file);
@@ -978,8 +978,8 @@ static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
978 return uvc_xu_ctrl_query(video, arg, 1); 978 return uvc_xu_ctrl_query(video, arg, 1);
979 979
980 default: 980 default:
981 if ((ret = v4l_compat_translate_ioctl(inode, file, cmd, arg, 981 if ((ret = v4l_compat_translate_ioctl(file, cmd, arg,
982 uvc_v4l2_do_ioctl)) == -ENOIOCTLCMD) 982 __uvc_v4l2_do_ioctl)) == -ENOIOCTLCMD)
983 uvc_trace(UVC_TRACE_IOCTL, "Unknown ioctl 0x%08x\n", 983 uvc_trace(UVC_TRACE_IOCTL, "Unknown ioctl 0x%08x\n",
984 cmd); 984 cmd);
985 return ret; 985 return ret;
@@ -988,6 +988,12 @@ static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
988 return ret; 988 return ret;
989} 989}
990 990
991static int uvc_v4l2_do_ioctl(struct inode *inode, struct file *file,
992 unsigned int cmd, void *arg)
993{
994 return __uvc_v4l2_do_ioctl(file, cmd, arg);
995}
996
991static int uvc_v4l2_ioctl(struct inode *inode, struct file *file, 997static int uvc_v4l2_ioctl(struct inode *inode, struct file *file,
992 unsigned int cmd, unsigned long arg) 998 unsigned int cmd, unsigned long arg)
993{ 999{
diff --git a/drivers/media/video/v4l1-compat.c b/drivers/media/video/v4l1-compat.c
index 928cb4037372..f13c0a9d684f 100644
--- a/drivers/media/video/v4l1-compat.c
+++ b/drivers/media/video/v4l1-compat.c
@@ -57,8 +57,7 @@ MODULE_LICENSE("GPL");
57 */ 57 */
58 58
59static int 59static int
60get_v4l_control(struct inode *inode, 60get_v4l_control(struct file *file,
61 struct file *file,
62 int cid, 61 int cid,
63 v4l2_kioctl drv) 62 v4l2_kioctl drv)
64{ 63{
@@ -67,12 +66,12 @@ get_v4l_control(struct inode *inode,
67 int err; 66 int err;
68 67
69 qctrl2.id = cid; 68 qctrl2.id = cid;
70 err = drv(inode, file, VIDIOC_QUERYCTRL, &qctrl2); 69 err = drv(file, VIDIOC_QUERYCTRL, &qctrl2);
71 if (err < 0) 70 if (err < 0)
72 dprintk("VIDIOC_QUERYCTRL: %d\n", err); 71 dprintk("VIDIOC_QUERYCTRL: %d\n", err);
73 if (err == 0 && !(qctrl2.flags & V4L2_CTRL_FLAG_DISABLED)) { 72 if (err == 0 && !(qctrl2.flags & V4L2_CTRL_FLAG_DISABLED)) {
74 ctrl2.id = qctrl2.id; 73 ctrl2.id = qctrl2.id;
75 err = drv(inode, file, VIDIOC_G_CTRL, &ctrl2); 74 err = drv(file, VIDIOC_G_CTRL, &ctrl2);
76 if (err < 0) { 75 if (err < 0) {
77 dprintk("VIDIOC_G_CTRL: %d\n", err); 76 dprintk("VIDIOC_G_CTRL: %d\n", err);
78 return 0; 77 return 0;
@@ -85,8 +84,7 @@ get_v4l_control(struct inode *inode,
85} 84}
86 85
87static int 86static int
88set_v4l_control(struct inode *inode, 87set_v4l_control(struct file *file,
89 struct file *file,
90 int cid, 88 int cid,
91 int value, 89 int value,
92 v4l2_kioctl drv) 90 v4l2_kioctl drv)
@@ -96,7 +94,7 @@ set_v4l_control(struct inode *inode,
96 int err; 94 int err;
97 95
98 qctrl2.id = cid; 96 qctrl2.id = cid;
99 err = drv(inode, file, VIDIOC_QUERYCTRL, &qctrl2); 97 err = drv(file, VIDIOC_QUERYCTRL, &qctrl2);
100 if (err < 0) 98 if (err < 0)
101 dprintk("VIDIOC_QUERYCTRL: %d\n", err); 99 dprintk("VIDIOC_QUERYCTRL: %d\n", err);
102 if (err == 0 && 100 if (err == 0 &&
@@ -114,7 +112,7 @@ set_v4l_control(struct inode *inode,
114 + 32767) 112 + 32767)
115 / 65535; 113 / 65535;
116 ctrl2.value += qctrl2.minimum; 114 ctrl2.value += qctrl2.minimum;
117 err = drv(inode, file, VIDIOC_S_CTRL, &ctrl2); 115 err = drv(file, VIDIOC_S_CTRL, &ctrl2);
118 if (err < 0) 116 if (err < 0)
119 dprintk("VIDIOC_S_CTRL: %d\n", err); 117 dprintk("VIDIOC_S_CTRL: %d\n", err);
120 } 118 }
@@ -222,7 +220,6 @@ static int poll_one(struct file *file, struct poll_wqueues *pwq)
222} 220}
223 221
224static int count_inputs( 222static int count_inputs(
225 struct inode *inode,
226 struct file *file, 223 struct file *file,
227 v4l2_kioctl drv) 224 v4l2_kioctl drv)
228{ 225{
@@ -232,14 +229,13 @@ static int count_inputs(
232 for (i = 0;; i++) { 229 for (i = 0;; i++) {
233 memset(&input2, 0, sizeof(input2)); 230 memset(&input2, 0, sizeof(input2));
234 input2.index = i; 231 input2.index = i;
235 if (0 != drv(inode, file, VIDIOC_ENUMINPUT, &input2)) 232 if (0 != drv(file, VIDIOC_ENUMINPUT, &input2))
236 break; 233 break;
237 } 234 }
238 return i; 235 return i;
239} 236}
240 237
241static int check_size( 238static int check_size(
242 struct inode *inode,
243 struct file *file, 239 struct file *file,
244 v4l2_kioctl drv, 240 v4l2_kioctl drv,
245 int *maxw, 241 int *maxw,
@@ -252,14 +248,14 @@ static int check_size(
252 memset(&fmt2, 0, sizeof(fmt2)); 248 memset(&fmt2, 0, sizeof(fmt2));
253 249
254 desc2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 250 desc2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
255 if (0 != drv(inode, file, VIDIOC_ENUM_FMT, &desc2)) 251 if (0 != drv(file, VIDIOC_ENUM_FMT, &desc2))
256 goto done; 252 goto done;
257 253
258 fmt2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 254 fmt2.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
259 fmt2.fmt.pix.width = 10000; 255 fmt2.fmt.pix.width = 10000;
260 fmt2.fmt.pix.height = 10000; 256 fmt2.fmt.pix.height = 10000;
261 fmt2.fmt.pix.pixelformat = desc2.pixelformat; 257 fmt2.fmt.pix.pixelformat = desc2.pixelformat;
262 if (0 != drv(inode, file, VIDIOC_TRY_FMT, &fmt2)) 258 if (0 != drv(file, VIDIOC_TRY_FMT, &fmt2))
263 goto done; 259 goto done;
264 260
265 *maxw = fmt2.fmt.pix.width; 261 *maxw = fmt2.fmt.pix.width;
@@ -273,7 +269,6 @@ done:
273 269
274static noinline int v4l1_compat_get_capabilities( 270static noinline int v4l1_compat_get_capabilities(
275 struct video_capability *cap, 271 struct video_capability *cap,
276 struct inode *inode,
277 struct file *file, 272 struct file *file,
278 v4l2_kioctl drv) 273 v4l2_kioctl drv)
279{ 274{
@@ -289,13 +284,13 @@ static noinline int v4l1_compat_get_capabilities(
289 memset(cap, 0, sizeof(*cap)); 284 memset(cap, 0, sizeof(*cap));
290 memset(&fbuf, 0, sizeof(fbuf)); 285 memset(&fbuf, 0, sizeof(fbuf));
291 286
292 err = drv(inode, file, VIDIOC_QUERYCAP, cap2); 287 err = drv(file, VIDIOC_QUERYCAP, cap2);
293 if (err < 0) { 288 if (err < 0) {
294 dprintk("VIDIOCGCAP / VIDIOC_QUERYCAP: %d\n", err); 289 dprintk("VIDIOCGCAP / VIDIOC_QUERYCAP: %d\n", err);
295 goto done; 290 goto done;
296 } 291 }
297 if (cap2->capabilities & V4L2_CAP_VIDEO_OVERLAY) { 292 if (cap2->capabilities & V4L2_CAP_VIDEO_OVERLAY) {
298 err = drv(inode, file, VIDIOC_G_FBUF, &fbuf); 293 err = drv(file, VIDIOC_G_FBUF, &fbuf);
299 if (err < 0) { 294 if (err < 0) {
300 dprintk("VIDIOCGCAP / VIDIOC_G_FBUF: %d\n", err); 295 dprintk("VIDIOCGCAP / VIDIOC_G_FBUF: %d\n", err);
301 memset(&fbuf, 0, sizeof(fbuf)); 296 memset(&fbuf, 0, sizeof(fbuf));
@@ -317,8 +312,8 @@ static noinline int v4l1_compat_get_capabilities(
317 if (fbuf.capability & V4L2_FBUF_CAP_LIST_CLIPPING) 312 if (fbuf.capability & V4L2_FBUF_CAP_LIST_CLIPPING)
318 cap->type |= VID_TYPE_CLIPPING; 313 cap->type |= VID_TYPE_CLIPPING;
319 314
320 cap->channels = count_inputs(inode, file, drv); 315 cap->channels = count_inputs(file, drv);
321 check_size(inode, file, drv, 316 check_size(file, drv,
322 &cap->maxwidth, &cap->maxheight); 317 &cap->maxwidth, &cap->maxheight);
323 cap->audios = 0; /* FIXME */ 318 cap->audios = 0; /* FIXME */
324 cap->minwidth = 48; /* FIXME */ 319 cap->minwidth = 48; /* FIXME */
@@ -331,7 +326,6 @@ done:
331 326
332static noinline int v4l1_compat_get_frame_buffer( 327static noinline int v4l1_compat_get_frame_buffer(
333 struct video_buffer *buffer, 328 struct video_buffer *buffer,
334 struct inode *inode,
335 struct file *file, 329 struct file *file,
336 v4l2_kioctl drv) 330 v4l2_kioctl drv)
337{ 331{
@@ -341,7 +335,7 @@ static noinline int v4l1_compat_get_frame_buffer(
341 memset(buffer, 0, sizeof(*buffer)); 335 memset(buffer, 0, sizeof(*buffer));
342 memset(&fbuf, 0, sizeof(fbuf)); 336 memset(&fbuf, 0, sizeof(fbuf));
343 337
344 err = drv(inode, file, VIDIOC_G_FBUF, &fbuf); 338 err = drv(file, VIDIOC_G_FBUF, &fbuf);
345 if (err < 0) { 339 if (err < 0) {
346 dprintk("VIDIOCGFBUF / VIDIOC_G_FBUF: %d\n", err); 340 dprintk("VIDIOCGFBUF / VIDIOC_G_FBUF: %d\n", err);
347 goto done; 341 goto done;
@@ -386,7 +380,6 @@ done:
386 380
387static noinline int v4l1_compat_set_frame_buffer( 381static noinline int v4l1_compat_set_frame_buffer(
388 struct video_buffer *buffer, 382 struct video_buffer *buffer,
389 struct inode *inode,
390 struct file *file, 383 struct file *file,
391 v4l2_kioctl drv) 384 v4l2_kioctl drv)
392{ 385{
@@ -415,7 +408,7 @@ static noinline int v4l1_compat_set_frame_buffer(
415 break; 408 break;
416 } 409 }
417 fbuf.fmt.bytesperline = buffer->bytesperline; 410 fbuf.fmt.bytesperline = buffer->bytesperline;
418 err = drv(inode, file, VIDIOC_S_FBUF, &fbuf); 411 err = drv(file, VIDIOC_S_FBUF, &fbuf);
419 if (err < 0) 412 if (err < 0)
420 dprintk("VIDIOCSFBUF / VIDIOC_S_FBUF: %d\n", err); 413 dprintk("VIDIOCSFBUF / VIDIOC_S_FBUF: %d\n", err);
421 return err; 414 return err;
@@ -423,7 +416,6 @@ static noinline int v4l1_compat_set_frame_buffer(
423 416
424static noinline int v4l1_compat_get_win_cap_dimensions( 417static noinline int v4l1_compat_get_win_cap_dimensions(
425 struct video_window *win, 418 struct video_window *win,
426 struct inode *inode,
427 struct file *file, 419 struct file *file,
428 v4l2_kioctl drv) 420 v4l2_kioctl drv)
429{ 421{
@@ -438,7 +430,7 @@ static noinline int v4l1_compat_get_win_cap_dimensions(
438 memset(win, 0, sizeof(*win)); 430 memset(win, 0, sizeof(*win));
439 431
440 fmt->type = V4L2_BUF_TYPE_VIDEO_OVERLAY; 432 fmt->type = V4L2_BUF_TYPE_VIDEO_OVERLAY;
441 err = drv(inode, file, VIDIOC_G_FMT, fmt); 433 err = drv(file, VIDIOC_G_FMT, fmt);
442 if (err < 0) 434 if (err < 0)
443 dprintk("VIDIOCGWIN / VIDIOC_G_WIN: %d\n", err); 435 dprintk("VIDIOCGWIN / VIDIOC_G_WIN: %d\n", err);
444 if (err == 0) { 436 if (err == 0) {
@@ -453,7 +445,7 @@ static noinline int v4l1_compat_get_win_cap_dimensions(
453 } 445 }
454 446
455 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 447 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
456 err = drv(inode, file, VIDIOC_G_FMT, fmt); 448 err = drv(file, VIDIOC_G_FMT, fmt);
457 if (err < 0) { 449 if (err < 0) {
458 dprintk("VIDIOCGWIN / VIDIOC_G_FMT: %d\n", err); 450 dprintk("VIDIOCGWIN / VIDIOC_G_FMT: %d\n", err);
459 goto done; 451 goto done;
@@ -472,7 +464,6 @@ done:
472 464
473static noinline int v4l1_compat_set_win_cap_dimensions( 465static noinline int v4l1_compat_set_win_cap_dimensions(
474 struct video_window *win, 466 struct video_window *win,
475 struct inode *inode,
476 struct file *file, 467 struct file *file,
477 v4l2_kioctl drv) 468 v4l2_kioctl drv)
478{ 469{
@@ -485,8 +476,8 @@ static noinline int v4l1_compat_set_win_cap_dimensions(
485 return err; 476 return err;
486 } 477 }
487 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 478 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
488 drv(inode, file, VIDIOC_STREAMOFF, &fmt->type); 479 drv(file, VIDIOC_STREAMOFF, &fmt->type);
489 err1 = drv(inode, file, VIDIOC_G_FMT, fmt); 480 err1 = drv(file, VIDIOC_G_FMT, fmt);
490 if (err1 < 0) 481 if (err1 < 0)
491 dprintk("VIDIOCSWIN / VIDIOC_G_FMT: %d\n", err1); 482 dprintk("VIDIOCSWIN / VIDIOC_G_FMT: %d\n", err1);
492 if (err1 == 0) { 483 if (err1 == 0) {
@@ -494,7 +485,7 @@ static noinline int v4l1_compat_set_win_cap_dimensions(
494 fmt->fmt.pix.height = win->height; 485 fmt->fmt.pix.height = win->height;
495 fmt->fmt.pix.field = V4L2_FIELD_ANY; 486 fmt->fmt.pix.field = V4L2_FIELD_ANY;
496 fmt->fmt.pix.bytesperline = 0; 487 fmt->fmt.pix.bytesperline = 0;
497 err = drv(inode, file, VIDIOC_S_FMT, fmt); 488 err = drv(file, VIDIOC_S_FMT, fmt);
498 if (err < 0) 489 if (err < 0)
499 dprintk("VIDIOCSWIN / VIDIOC_S_FMT #1: %d\n", 490 dprintk("VIDIOCSWIN / VIDIOC_S_FMT #1: %d\n",
500 err); 491 err);
@@ -511,7 +502,7 @@ static noinline int v4l1_compat_set_win_cap_dimensions(
511 fmt->fmt.win.chromakey = win->chromakey; 502 fmt->fmt.win.chromakey = win->chromakey;
512 fmt->fmt.win.clips = (void __user *)win->clips; 503 fmt->fmt.win.clips = (void __user *)win->clips;
513 fmt->fmt.win.clipcount = win->clipcount; 504 fmt->fmt.win.clipcount = win->clipcount;
514 err2 = drv(inode, file, VIDIOC_S_FMT, fmt); 505 err2 = drv(file, VIDIOC_S_FMT, fmt);
515 if (err2 < 0) 506 if (err2 < 0)
516 dprintk("VIDIOCSWIN / VIDIOC_S_FMT #2: %d\n", err2); 507 dprintk("VIDIOCSWIN / VIDIOC_S_FMT #2: %d\n", err2);
517 508
@@ -525,7 +516,6 @@ static noinline int v4l1_compat_set_win_cap_dimensions(
525 516
526static noinline int v4l1_compat_turn_preview_on_off( 517static noinline int v4l1_compat_turn_preview_on_off(
527 int *on, 518 int *on,
528 struct inode *inode,
529 struct file *file, 519 struct file *file,
530 v4l2_kioctl drv) 520 v4l2_kioctl drv)
531{ 521{
@@ -536,9 +526,9 @@ static noinline int v4l1_compat_turn_preview_on_off(
536 /* dirty hack time. But v4l1 has no STREAMOFF 526 /* dirty hack time. But v4l1 has no STREAMOFF
537 * equivalent in the API, and this one at 527 * equivalent in the API, and this one at
538 * least comes close ... */ 528 * least comes close ... */
539 drv(inode, file, VIDIOC_STREAMOFF, &captype); 529 drv(file, VIDIOC_STREAMOFF, &captype);
540 } 530 }
541 err = drv(inode, file, VIDIOC_OVERLAY, on); 531 err = drv(file, VIDIOC_OVERLAY, on);
542 if (err < 0) 532 if (err < 0)
543 dprintk("VIDIOCCAPTURE / VIDIOC_PREVIEW: %d\n", err); 533 dprintk("VIDIOCCAPTURE / VIDIOC_PREVIEW: %d\n", err);
544 return err; 534 return err;
@@ -546,7 +536,6 @@ static noinline int v4l1_compat_turn_preview_on_off(
546 536
547static noinline int v4l1_compat_get_input_info( 537static noinline int v4l1_compat_get_input_info(
548 struct video_channel *chan, 538 struct video_channel *chan,
549 struct inode *inode,
550 struct file *file, 539 struct file *file,
551 v4l2_kioctl drv) 540 v4l2_kioctl drv)
552{ 541{
@@ -556,7 +545,7 @@ static noinline int v4l1_compat_get_input_info(
556 545
557 memset(&input2, 0, sizeof(input2)); 546 memset(&input2, 0, sizeof(input2));
558 input2.index = chan->channel; 547 input2.index = chan->channel;
559 err = drv(inode, file, VIDIOC_ENUMINPUT, &input2); 548 err = drv(file, VIDIOC_ENUMINPUT, &input2);
560 if (err < 0) { 549 if (err < 0) {
561 dprintk("VIDIOCGCHAN / VIDIOC_ENUMINPUT: " 550 dprintk("VIDIOCGCHAN / VIDIOC_ENUMINPUT: "
562 "channel=%d err=%d\n", chan->channel, err); 551 "channel=%d err=%d\n", chan->channel, err);
@@ -578,7 +567,7 @@ static noinline int v4l1_compat_get_input_info(
578 break; 567 break;
579 } 568 }
580 chan->norm = 0; 569 chan->norm = 0;
581 err = drv(inode, file, VIDIOC_G_STD, &sid); 570 err = drv(file, VIDIOC_G_STD, &sid);
582 if (err < 0) 571 if (err < 0)
583 dprintk("VIDIOCGCHAN / VIDIOC_G_STD: %d\n", err); 572 dprintk("VIDIOCGCHAN / VIDIOC_G_STD: %d\n", err);
584 if (err == 0) { 573 if (err == 0) {
@@ -595,14 +584,13 @@ done:
595 584
596static noinline int v4l1_compat_set_input( 585static noinline int v4l1_compat_set_input(
597 struct video_channel *chan, 586 struct video_channel *chan,
598 struct inode *inode,
599 struct file *file, 587 struct file *file,
600 v4l2_kioctl drv) 588 v4l2_kioctl drv)
601{ 589{
602 int err; 590 int err;
603 v4l2_std_id sid = 0; 591 v4l2_std_id sid = 0;
604 592
605 err = drv(inode, file, VIDIOC_S_INPUT, &chan->channel); 593 err = drv(file, VIDIOC_S_INPUT, &chan->channel);
606 if (err < 0) 594 if (err < 0)
607 dprintk("VIDIOCSCHAN / VIDIOC_S_INPUT: %d\n", err); 595 dprintk("VIDIOCSCHAN / VIDIOC_S_INPUT: %d\n", err);
608 switch (chan->norm) { 596 switch (chan->norm) {
@@ -617,7 +605,7 @@ static noinline int v4l1_compat_set_input(
617 break; 605 break;
618 } 606 }
619 if (0 != sid) { 607 if (0 != sid) {
620 err = drv(inode, file, VIDIOC_S_STD, &sid); 608 err = drv(file, VIDIOC_S_STD, &sid);
621 if (err < 0) 609 if (err < 0)
622 dprintk("VIDIOCSCHAN / VIDIOC_S_STD: %d\n", err); 610 dprintk("VIDIOCSCHAN / VIDIOC_S_STD: %d\n", err);
623 } 611 }
@@ -626,7 +614,6 @@ static noinline int v4l1_compat_set_input(
626 614
627static noinline int v4l1_compat_get_picture( 615static noinline int v4l1_compat_get_picture(
628 struct video_picture *pict, 616 struct video_picture *pict,
629 struct inode *inode,
630 struct file *file, 617 struct file *file,
631 v4l2_kioctl drv) 618 v4l2_kioctl drv)
632{ 619{
@@ -639,19 +626,19 @@ static noinline int v4l1_compat_get_picture(
639 return err; 626 return err;
640 } 627 }
641 628
642 pict->brightness = get_v4l_control(inode, file, 629 pict->brightness = get_v4l_control(file,
643 V4L2_CID_BRIGHTNESS, drv); 630 V4L2_CID_BRIGHTNESS, drv);
644 pict->hue = get_v4l_control(inode, file, 631 pict->hue = get_v4l_control(file,
645 V4L2_CID_HUE, drv); 632 V4L2_CID_HUE, drv);
646 pict->contrast = get_v4l_control(inode, file, 633 pict->contrast = get_v4l_control(file,
647 V4L2_CID_CONTRAST, drv); 634 V4L2_CID_CONTRAST, drv);
648 pict->colour = get_v4l_control(inode, file, 635 pict->colour = get_v4l_control(file,
649 V4L2_CID_SATURATION, drv); 636 V4L2_CID_SATURATION, drv);
650 pict->whiteness = get_v4l_control(inode, file, 637 pict->whiteness = get_v4l_control(file,
651 V4L2_CID_WHITENESS, drv); 638 V4L2_CID_WHITENESS, drv);
652 639
653 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 640 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
654 err = drv(inode, file, VIDIOC_G_FMT, fmt); 641 err = drv(file, VIDIOC_G_FMT, fmt);
655 if (err < 0) { 642 if (err < 0) {
656 dprintk("VIDIOCGPICT / VIDIOC_G_FMT: %d\n", err); 643 dprintk("VIDIOCGPICT / VIDIOC_G_FMT: %d\n", err);
657 goto done; 644 goto done;
@@ -669,7 +656,6 @@ done:
669 656
670static noinline int v4l1_compat_set_picture( 657static noinline int v4l1_compat_set_picture(
671 struct video_picture *pict, 658 struct video_picture *pict,
672 struct inode *inode,
673 struct file *file, 659 struct file *file,
674 v4l2_kioctl drv) 660 v4l2_kioctl drv)
675{ 661{
@@ -685,15 +671,15 @@ static noinline int v4l1_compat_set_picture(
685 } 671 }
686 memset(&fbuf, 0, sizeof(fbuf)); 672 memset(&fbuf, 0, sizeof(fbuf));
687 673
688 set_v4l_control(inode, file, 674 set_v4l_control(file,
689 V4L2_CID_BRIGHTNESS, pict->brightness, drv); 675 V4L2_CID_BRIGHTNESS, pict->brightness, drv);
690 set_v4l_control(inode, file, 676 set_v4l_control(file,
691 V4L2_CID_HUE, pict->hue, drv); 677 V4L2_CID_HUE, pict->hue, drv);
692 set_v4l_control(inode, file, 678 set_v4l_control(file,
693 V4L2_CID_CONTRAST, pict->contrast, drv); 679 V4L2_CID_CONTRAST, pict->contrast, drv);
694 set_v4l_control(inode, file, 680 set_v4l_control(file,
695 V4L2_CID_SATURATION, pict->colour, drv); 681 V4L2_CID_SATURATION, pict->colour, drv);
696 set_v4l_control(inode, file, 682 set_v4l_control(file,
697 V4L2_CID_WHITENESS, pict->whiteness, drv); 683 V4L2_CID_WHITENESS, pict->whiteness, drv);
698 /* 684 /*
699 * V4L1 uses this ioctl to set both memory capture and overlay 685 * V4L1 uses this ioctl to set both memory capture and overlay
@@ -703,7 +689,7 @@ static noinline int v4l1_compat_set_picture(
703 */ 689 */
704 690
705 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 691 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
706 err = drv(inode, file, VIDIOC_G_FMT, fmt); 692 err = drv(file, VIDIOC_G_FMT, fmt);
707 /* If VIDIOC_G_FMT failed, then the driver likely doesn't 693 /* If VIDIOC_G_FMT failed, then the driver likely doesn't
708 support memory capture. Trying to set the memory capture 694 support memory capture. Trying to set the memory capture
709 parameters would be pointless. */ 695 parameters would be pointless. */
@@ -714,13 +700,13 @@ static noinline int v4l1_compat_set_picture(
714 palette_to_pixelformat(pict->palette)) { 700 palette_to_pixelformat(pict->palette)) {
715 fmt->fmt.pix.pixelformat = palette_to_pixelformat( 701 fmt->fmt.pix.pixelformat = palette_to_pixelformat(
716 pict->palette); 702 pict->palette);
717 mem_err = drv(inode, file, VIDIOC_S_FMT, fmt); 703 mem_err = drv(file, VIDIOC_S_FMT, fmt);
718 if (mem_err < 0) 704 if (mem_err < 0)
719 dprintk("VIDIOCSPICT / VIDIOC_S_FMT: %d\n", 705 dprintk("VIDIOCSPICT / VIDIOC_S_FMT: %d\n",
720 mem_err); 706 mem_err);
721 } 707 }
722 708
723 err = drv(inode, file, VIDIOC_G_FBUF, &fbuf); 709 err = drv(file, VIDIOC_G_FBUF, &fbuf);
724 /* If VIDIOC_G_FBUF failed, then the driver likely doesn't 710 /* If VIDIOC_G_FBUF failed, then the driver likely doesn't
725 support overlay. Trying to set the overlay parameters 711 support overlay. Trying to set the overlay parameters
726 would be quite pointless. */ 712 would be quite pointless. */
@@ -731,7 +717,7 @@ static noinline int v4l1_compat_set_picture(
731 palette_to_pixelformat(pict->palette)) { 717 palette_to_pixelformat(pict->palette)) {
732 fbuf.fmt.pixelformat = palette_to_pixelformat( 718 fbuf.fmt.pixelformat = palette_to_pixelformat(
733 pict->palette); 719 pict->palette);
734 ovl_err = drv(inode, file, VIDIOC_S_FBUF, &fbuf); 720 ovl_err = drv(file, VIDIOC_S_FBUF, &fbuf);
735 if (ovl_err < 0) 721 if (ovl_err < 0)
736 dprintk("VIDIOCSPICT / VIDIOC_S_FBUF: %d\n", 722 dprintk("VIDIOCSPICT / VIDIOC_S_FBUF: %d\n",
737 ovl_err); 723 ovl_err);
@@ -752,7 +738,6 @@ static noinline int v4l1_compat_set_picture(
752 738
753static noinline int v4l1_compat_get_tuner( 739static noinline int v4l1_compat_get_tuner(
754 struct video_tuner *tun, 740 struct video_tuner *tun,
755 struct inode *inode,
756 struct file *file, 741 struct file *file,
757 v4l2_kioctl drv) 742 v4l2_kioctl drv)
758{ 743{
@@ -762,7 +747,7 @@ static noinline int v4l1_compat_get_tuner(
762 v4l2_std_id sid; 747 v4l2_std_id sid;
763 748
764 memset(&tun2, 0, sizeof(tun2)); 749 memset(&tun2, 0, sizeof(tun2));
765 err = drv(inode, file, VIDIOC_G_TUNER, &tun2); 750 err = drv(file, VIDIOC_G_TUNER, &tun2);
766 if (err < 0) { 751 if (err < 0) {
767 dprintk("VIDIOCGTUNER / VIDIOC_G_TUNER: %d\n", err); 752 dprintk("VIDIOCGTUNER / VIDIOC_G_TUNER: %d\n", err);
768 goto done; 753 goto done;
@@ -778,7 +763,7 @@ static noinline int v4l1_compat_get_tuner(
778 for (i = 0; i < 64; i++) { 763 for (i = 0; i < 64; i++) {
779 memset(&std2, 0, sizeof(std2)); 764 memset(&std2, 0, sizeof(std2));
780 std2.index = i; 765 std2.index = i;
781 if (0 != drv(inode, file, VIDIOC_ENUMSTD, &std2)) 766 if (0 != drv(file, VIDIOC_ENUMSTD, &std2))
782 break; 767 break;
783 if (std2.id & V4L2_STD_PAL) 768 if (std2.id & V4L2_STD_PAL)
784 tun->flags |= VIDEO_TUNER_PAL; 769 tun->flags |= VIDEO_TUNER_PAL;
@@ -788,7 +773,7 @@ static noinline int v4l1_compat_get_tuner(
788 tun->flags |= VIDEO_TUNER_SECAM; 773 tun->flags |= VIDEO_TUNER_SECAM;
789 } 774 }
790 775
791 err = drv(inode, file, VIDIOC_G_STD, &sid); 776 err = drv(file, VIDIOC_G_STD, &sid);
792 if (err < 0) 777 if (err < 0)
793 dprintk("VIDIOCGTUNER / VIDIOC_G_STD: %d\n", err); 778 dprintk("VIDIOCGTUNER / VIDIOC_G_STD: %d\n", err);
794 if (err == 0) { 779 if (err == 0) {
@@ -811,7 +796,6 @@ done:
811 796
812static noinline int v4l1_compat_select_tuner( 797static noinline int v4l1_compat_select_tuner(
813 struct video_tuner *tun, 798 struct video_tuner *tun,
814 struct inode *inode,
815 struct file *file, 799 struct file *file,
816 v4l2_kioctl drv) 800 v4l2_kioctl drv)
817{ 801{
@@ -821,7 +805,7 @@ static noinline int v4l1_compat_select_tuner(
821 805
822 t.index = tun->tuner; 806 t.index = tun->tuner;
823 807
824 err = drv(inode, file, VIDIOC_S_INPUT, &t); 808 err = drv(file, VIDIOC_S_INPUT, &t);
825 if (err < 0) 809 if (err < 0)
826 dprintk("VIDIOCSTUNER / VIDIOC_S_INPUT: %d\n", err); 810 dprintk("VIDIOCSTUNER / VIDIOC_S_INPUT: %d\n", err);
827 return err; 811 return err;
@@ -829,7 +813,6 @@ static noinline int v4l1_compat_select_tuner(
829 813
830static noinline int v4l1_compat_get_frequency( 814static noinline int v4l1_compat_get_frequency(
831 unsigned long *freq, 815 unsigned long *freq,
832 struct inode *inode,
833 struct file *file, 816 struct file *file,
834 v4l2_kioctl drv) 817 v4l2_kioctl drv)
835{ 818{
@@ -838,7 +821,7 @@ static noinline int v4l1_compat_get_frequency(
838 memset(&freq2, 0, sizeof(freq2)); 821 memset(&freq2, 0, sizeof(freq2));
839 822
840 freq2.tuner = 0; 823 freq2.tuner = 0;
841 err = drv(inode, file, VIDIOC_G_FREQUENCY, &freq2); 824 err = drv(file, VIDIOC_G_FREQUENCY, &freq2);
842 if (err < 0) 825 if (err < 0)
843 dprintk("VIDIOCGFREQ / VIDIOC_G_FREQUENCY: %d\n", err); 826 dprintk("VIDIOCGFREQ / VIDIOC_G_FREQUENCY: %d\n", err);
844 if (0 == err) 827 if (0 == err)
@@ -848,7 +831,6 @@ static noinline int v4l1_compat_get_frequency(
848 831
849static noinline int v4l1_compat_set_frequency( 832static noinline int v4l1_compat_set_frequency(
850 unsigned long *freq, 833 unsigned long *freq,
851 struct inode *inode,
852 struct file *file, 834 struct file *file,
853 v4l2_kioctl drv) 835 v4l2_kioctl drv)
854{ 836{
@@ -856,9 +838,9 @@ static noinline int v4l1_compat_set_frequency(
856 struct v4l2_frequency freq2; 838 struct v4l2_frequency freq2;
857 memset(&freq2, 0, sizeof(freq2)); 839 memset(&freq2, 0, sizeof(freq2));
858 840
859 drv(inode, file, VIDIOC_G_FREQUENCY, &freq2); 841 drv(file, VIDIOC_G_FREQUENCY, &freq2);
860 freq2.frequency = *freq; 842 freq2.frequency = *freq;
861 err = drv(inode, file, VIDIOC_S_FREQUENCY, &freq2); 843 err = drv(file, VIDIOC_S_FREQUENCY, &freq2);
862 if (err < 0) 844 if (err < 0)
863 dprintk("VIDIOCSFREQ / VIDIOC_S_FREQUENCY: %d\n", err); 845 dprintk("VIDIOCSFREQ / VIDIOC_S_FREQUENCY: %d\n", err);
864 return err; 846 return err;
@@ -866,7 +848,6 @@ static noinline int v4l1_compat_set_frequency(
866 848
867static noinline int v4l1_compat_get_audio( 849static noinline int v4l1_compat_get_audio(
868 struct video_audio *aud, 850 struct video_audio *aud,
869 struct inode *inode,
870 struct file *file, 851 struct file *file,
871 v4l2_kioctl drv) 852 v4l2_kioctl drv)
872{ 853{
@@ -876,7 +857,7 @@ static noinline int v4l1_compat_get_audio(
876 struct v4l2_tuner tun2; 857 struct v4l2_tuner tun2;
877 memset(&aud2, 0, sizeof(aud2)); 858 memset(&aud2, 0, sizeof(aud2));
878 859
879 err = drv(inode, file, VIDIOC_G_AUDIO, &aud2); 860 err = drv(file, VIDIOC_G_AUDIO, &aud2);
880 if (err < 0) { 861 if (err < 0) {
881 dprintk("VIDIOCGAUDIO / VIDIOC_G_AUDIO: %d\n", err); 862 dprintk("VIDIOCGAUDIO / VIDIOC_G_AUDIO: %d\n", err);
882 goto done; 863 goto done;
@@ -886,27 +867,27 @@ static noinline int v4l1_compat_get_audio(
886 aud->name[sizeof(aud->name) - 1] = 0; 867 aud->name[sizeof(aud->name) - 1] = 0;
887 aud->audio = aud2.index; 868 aud->audio = aud2.index;
888 aud->flags = 0; 869 aud->flags = 0;
889 i = get_v4l_control(inode, file, V4L2_CID_AUDIO_VOLUME, drv); 870 i = get_v4l_control(file, V4L2_CID_AUDIO_VOLUME, drv);
890 if (i >= 0) { 871 if (i >= 0) {
891 aud->volume = i; 872 aud->volume = i;
892 aud->flags |= VIDEO_AUDIO_VOLUME; 873 aud->flags |= VIDEO_AUDIO_VOLUME;
893 } 874 }
894 i = get_v4l_control(inode, file, V4L2_CID_AUDIO_BASS, drv); 875 i = get_v4l_control(file, V4L2_CID_AUDIO_BASS, drv);
895 if (i >= 0) { 876 if (i >= 0) {
896 aud->bass = i; 877 aud->bass = i;
897 aud->flags |= VIDEO_AUDIO_BASS; 878 aud->flags |= VIDEO_AUDIO_BASS;
898 } 879 }
899 i = get_v4l_control(inode, file, V4L2_CID_AUDIO_TREBLE, drv); 880 i = get_v4l_control(file, V4L2_CID_AUDIO_TREBLE, drv);
900 if (i >= 0) { 881 if (i >= 0) {
901 aud->treble = i; 882 aud->treble = i;
902 aud->flags |= VIDEO_AUDIO_TREBLE; 883 aud->flags |= VIDEO_AUDIO_TREBLE;
903 } 884 }
904 i = get_v4l_control(inode, file, V4L2_CID_AUDIO_BALANCE, drv); 885 i = get_v4l_control(file, V4L2_CID_AUDIO_BALANCE, drv);
905 if (i >= 0) { 886 if (i >= 0) {
906 aud->balance = i; 887 aud->balance = i;
907 aud->flags |= VIDEO_AUDIO_BALANCE; 888 aud->flags |= VIDEO_AUDIO_BALANCE;
908 } 889 }
909 i = get_v4l_control(inode, file, V4L2_CID_AUDIO_MUTE, drv); 890 i = get_v4l_control(file, V4L2_CID_AUDIO_MUTE, drv);
910 if (i >= 0) { 891 if (i >= 0) {
911 if (i) 892 if (i)
912 aud->flags |= VIDEO_AUDIO_MUTE; 893 aud->flags |= VIDEO_AUDIO_MUTE;
@@ -914,13 +895,13 @@ static noinline int v4l1_compat_get_audio(
914 } 895 }
915 aud->step = 1; 896 aud->step = 1;
916 qctrl2.id = V4L2_CID_AUDIO_VOLUME; 897 qctrl2.id = V4L2_CID_AUDIO_VOLUME;
917 if (drv(inode, file, VIDIOC_QUERYCTRL, &qctrl2) == 0 && 898 if (drv(file, VIDIOC_QUERYCTRL, &qctrl2) == 0 &&
918 !(qctrl2.flags & V4L2_CTRL_FLAG_DISABLED)) 899 !(qctrl2.flags & V4L2_CTRL_FLAG_DISABLED))
919 aud->step = qctrl2.step; 900 aud->step = qctrl2.step;
920 aud->mode = 0; 901 aud->mode = 0;
921 902
922 memset(&tun2, 0, sizeof(tun2)); 903 memset(&tun2, 0, sizeof(tun2));
923 err = drv(inode, file, VIDIOC_G_TUNER, &tun2); 904 err = drv(file, VIDIOC_G_TUNER, &tun2);
924 if (err < 0) { 905 if (err < 0) {
925 dprintk("VIDIOCGAUDIO / VIDIOC_G_TUNER: %d\n", err); 906 dprintk("VIDIOCGAUDIO / VIDIOC_G_TUNER: %d\n", err);
926 err = 0; 907 err = 0;
@@ -939,7 +920,6 @@ done:
939 920
940static noinline int v4l1_compat_set_audio( 921static noinline int v4l1_compat_set_audio(
941 struct video_audio *aud, 922 struct video_audio *aud,
942 struct inode *inode,
943 struct file *file, 923 struct file *file,
944 v4l2_kioctl drv) 924 v4l2_kioctl drv)
945{ 925{
@@ -951,24 +931,24 @@ static noinline int v4l1_compat_set_audio(
951 memset(&tun2, 0, sizeof(tun2)); 931 memset(&tun2, 0, sizeof(tun2));
952 932
953 aud2.index = aud->audio; 933 aud2.index = aud->audio;
954 err = drv(inode, file, VIDIOC_S_AUDIO, &aud2); 934 err = drv(file, VIDIOC_S_AUDIO, &aud2);
955 if (err < 0) { 935 if (err < 0) {
956 dprintk("VIDIOCSAUDIO / VIDIOC_S_AUDIO: %d\n", err); 936 dprintk("VIDIOCSAUDIO / VIDIOC_S_AUDIO: %d\n", err);
957 goto done; 937 goto done;
958 } 938 }
959 939
960 set_v4l_control(inode, file, V4L2_CID_AUDIO_VOLUME, 940 set_v4l_control(file, V4L2_CID_AUDIO_VOLUME,
961 aud->volume, drv); 941 aud->volume, drv);
962 set_v4l_control(inode, file, V4L2_CID_AUDIO_BASS, 942 set_v4l_control(file, V4L2_CID_AUDIO_BASS,
963 aud->bass, drv); 943 aud->bass, drv);
964 set_v4l_control(inode, file, V4L2_CID_AUDIO_TREBLE, 944 set_v4l_control(file, V4L2_CID_AUDIO_TREBLE,
965 aud->treble, drv); 945 aud->treble, drv);
966 set_v4l_control(inode, file, V4L2_CID_AUDIO_BALANCE, 946 set_v4l_control(file, V4L2_CID_AUDIO_BALANCE,
967 aud->balance, drv); 947 aud->balance, drv);
968 set_v4l_control(inode, file, V4L2_CID_AUDIO_MUTE, 948 set_v4l_control(file, V4L2_CID_AUDIO_MUTE,
969 !!(aud->flags & VIDEO_AUDIO_MUTE), drv); 949 !!(aud->flags & VIDEO_AUDIO_MUTE), drv);
970 950
971 err = drv(inode, file, VIDIOC_G_TUNER, &tun2); 951 err = drv(file, VIDIOC_G_TUNER, &tun2);
972 if (err < 0) 952 if (err < 0)
973 dprintk("VIDIOCSAUDIO / VIDIOC_G_TUNER: %d\n", err); 953 dprintk("VIDIOCSAUDIO / VIDIOC_G_TUNER: %d\n", err);
974 if (err == 0) { 954 if (err == 0) {
@@ -985,7 +965,7 @@ static noinline int v4l1_compat_set_audio(
985 tun2.audmode = V4L2_TUNER_MODE_LANG2; 965 tun2.audmode = V4L2_TUNER_MODE_LANG2;
986 break; 966 break;
987 } 967 }
988 err = drv(inode, file, VIDIOC_S_TUNER, &tun2); 968 err = drv(file, VIDIOC_S_TUNER, &tun2);
989 if (err < 0) 969 if (err < 0)
990 dprintk("VIDIOCSAUDIO / VIDIOC_S_TUNER: %d\n", err); 970 dprintk("VIDIOCSAUDIO / VIDIOC_S_TUNER: %d\n", err);
991 } 971 }
@@ -996,7 +976,6 @@ done:
996 976
997static noinline int v4l1_compat_capture_frame( 977static noinline int v4l1_compat_capture_frame(
998 struct video_mmap *mm, 978 struct video_mmap *mm,
999 struct inode *inode,
1000 struct file *file, 979 struct file *file,
1001 v4l2_kioctl drv) 980 v4l2_kioctl drv)
1002{ 981{
@@ -1013,7 +992,7 @@ static noinline int v4l1_compat_capture_frame(
1013 memset(&buf, 0, sizeof(buf)); 992 memset(&buf, 0, sizeof(buf));
1014 993
1015 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 994 fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1016 err = drv(inode, file, VIDIOC_G_FMT, fmt); 995 err = drv(file, VIDIOC_G_FMT, fmt);
1017 if (err < 0) { 996 if (err < 0) {
1018 dprintk("VIDIOCMCAPTURE / VIDIOC_G_FMT: %d\n", err); 997 dprintk("VIDIOCMCAPTURE / VIDIOC_G_FMT: %d\n", err);
1019 goto done; 998 goto done;
@@ -1029,7 +1008,7 @@ static noinline int v4l1_compat_capture_frame(
1029 palette_to_pixelformat(mm->format); 1008 palette_to_pixelformat(mm->format);
1030 fmt->fmt.pix.field = V4L2_FIELD_ANY; 1009 fmt->fmt.pix.field = V4L2_FIELD_ANY;
1031 fmt->fmt.pix.bytesperline = 0; 1010 fmt->fmt.pix.bytesperline = 0;
1032 err = drv(inode, file, VIDIOC_S_FMT, fmt); 1011 err = drv(file, VIDIOC_S_FMT, fmt);
1033 if (err < 0) { 1012 if (err < 0) {
1034 dprintk("VIDIOCMCAPTURE / VIDIOC_S_FMT: %d\n", err); 1013 dprintk("VIDIOCMCAPTURE / VIDIOC_S_FMT: %d\n", err);
1035 goto done; 1014 goto done;
@@ -1037,17 +1016,17 @@ static noinline int v4l1_compat_capture_frame(
1037 } 1016 }
1038 buf.index = mm->frame; 1017 buf.index = mm->frame;
1039 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1018 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1040 err = drv(inode, file, VIDIOC_QUERYBUF, &buf); 1019 err = drv(file, VIDIOC_QUERYBUF, &buf);
1041 if (err < 0) { 1020 if (err < 0) {
1042 dprintk("VIDIOCMCAPTURE / VIDIOC_QUERYBUF: %d\n", err); 1021 dprintk("VIDIOCMCAPTURE / VIDIOC_QUERYBUF: %d\n", err);
1043 goto done; 1022 goto done;
1044 } 1023 }
1045 err = drv(inode, file, VIDIOC_QBUF, &buf); 1024 err = drv(file, VIDIOC_QBUF, &buf);
1046 if (err < 0) { 1025 if (err < 0) {
1047 dprintk("VIDIOCMCAPTURE / VIDIOC_QBUF: %d\n", err); 1026 dprintk("VIDIOCMCAPTURE / VIDIOC_QBUF: %d\n", err);
1048 goto done; 1027 goto done;
1049 } 1028 }
1050 err = drv(inode, file, VIDIOC_STREAMON, &captype); 1029 err = drv(file, VIDIOC_STREAMON, &captype);
1051 if (err < 0) 1030 if (err < 0)
1052 dprintk("VIDIOCMCAPTURE / VIDIOC_STREAMON: %d\n", err); 1031 dprintk("VIDIOCMCAPTURE / VIDIOC_STREAMON: %d\n", err);
1053done: 1032done:
@@ -1057,7 +1036,6 @@ done:
1057 1036
1058static noinline int v4l1_compat_sync( 1037static noinline int v4l1_compat_sync(
1059 int *i, 1038 int *i,
1060 struct inode *inode,
1061 struct file *file, 1039 struct file *file,
1062 v4l2_kioctl drv) 1040 v4l2_kioctl drv)
1063{ 1041{
@@ -1069,7 +1047,7 @@ static noinline int v4l1_compat_sync(
1069 memset(&buf, 0, sizeof(buf)); 1047 memset(&buf, 0, sizeof(buf));
1070 buf.index = *i; 1048 buf.index = *i;
1071 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 1049 buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1072 err = drv(inode, file, VIDIOC_QUERYBUF, &buf); 1050 err = drv(file, VIDIOC_QUERYBUF, &buf);
1073 if (err < 0) { 1051 if (err < 0) {
1074 /* No such buffer */ 1052 /* No such buffer */
1075 dprintk("VIDIOCSYNC / VIDIOC_QUERYBUF: %d\n", err); 1053 dprintk("VIDIOCSYNC / VIDIOC_QUERYBUF: %d\n", err);
@@ -1082,7 +1060,7 @@ static noinline int v4l1_compat_sync(
1082 } 1060 }
1083 1061
1084 /* make sure capture actually runs so we don't block forever */ 1062 /* make sure capture actually runs so we don't block forever */
1085 err = drv(inode, file, VIDIOC_STREAMON, &captype); 1063 err = drv(file, VIDIOC_STREAMON, &captype);
1086 if (err < 0) { 1064 if (err < 0) {
1087 dprintk("VIDIOCSYNC / VIDIOC_STREAMON: %d\n", err); 1065 dprintk("VIDIOCSYNC / VIDIOC_STREAMON: %d\n", err);
1088 goto done; 1066 goto done;
@@ -1096,7 +1074,7 @@ static noinline int v4l1_compat_sync(
1096 if (err < 0 || /* error or sleep was interrupted */ 1074 if (err < 0 || /* error or sleep was interrupted */
1097 err == 0) /* timeout? Shouldn't occur. */ 1075 err == 0) /* timeout? Shouldn't occur. */
1098 break; 1076 break;
1099 err = drv(inode, file, VIDIOC_QUERYBUF, &buf); 1077 err = drv(file, VIDIOC_QUERYBUF, &buf);
1100 if (err < 0) 1078 if (err < 0)
1101 dprintk("VIDIOCSYNC / VIDIOC_QUERYBUF: %d\n", err); 1079 dprintk("VIDIOCSYNC / VIDIOC_QUERYBUF: %d\n", err);
1102 } 1080 }
@@ -1104,7 +1082,7 @@ static noinline int v4l1_compat_sync(
1104 if (!(buf.flags & V4L2_BUF_FLAG_DONE)) /* not done */ 1082 if (!(buf.flags & V4L2_BUF_FLAG_DONE)) /* not done */
1105 goto done; 1083 goto done;
1106 do { 1084 do {
1107 err = drv(inode, file, VIDIOC_DQBUF, &buf); 1085 err = drv(file, VIDIOC_DQBUF, &buf);
1108 if (err < 0) 1086 if (err < 0)
1109 dprintk("VIDIOCSYNC / VIDIOC_DQBUF: %d\n", err); 1087 dprintk("VIDIOCSYNC / VIDIOC_DQBUF: %d\n", err);
1110 } while (err == 0 && buf.index != *i); 1088 } while (err == 0 && buf.index != *i);
@@ -1114,7 +1092,6 @@ done:
1114 1092
1115static noinline int v4l1_compat_get_vbi_format( 1093static noinline int v4l1_compat_get_vbi_format(
1116 struct vbi_format *fmt, 1094 struct vbi_format *fmt,
1117 struct inode *inode,
1118 struct file *file, 1095 struct file *file,
1119 v4l2_kioctl drv) 1096 v4l2_kioctl drv)
1120{ 1097{
@@ -1128,7 +1105,7 @@ static noinline int v4l1_compat_get_vbi_format(
1128 } 1105 }
1129 fmt2->type = V4L2_BUF_TYPE_VBI_CAPTURE; 1106 fmt2->type = V4L2_BUF_TYPE_VBI_CAPTURE;
1130 1107
1131 err = drv(inode, file, VIDIOC_G_FMT, fmt2); 1108 err = drv(file, VIDIOC_G_FMT, fmt2);
1132 if (err < 0) { 1109 if (err < 0) {
1133 dprintk("VIDIOCGVBIFMT / VIDIOC_G_FMT: %d\n", err); 1110 dprintk("VIDIOCGVBIFMT / VIDIOC_G_FMT: %d\n", err);
1134 goto done; 1111 goto done;
@@ -1153,7 +1130,6 @@ done:
1153 1130
1154static noinline int v4l1_compat_set_vbi_format( 1131static noinline int v4l1_compat_set_vbi_format(
1155 struct vbi_format *fmt, 1132 struct vbi_format *fmt,
1156 struct inode *inode,
1157 struct file *file, 1133 struct file *file,
1158 v4l2_kioctl drv) 1134 v4l2_kioctl drv)
1159{ 1135{
@@ -1179,7 +1155,7 @@ static noinline int v4l1_compat_set_vbi_format(
1179 fmt2->fmt.vbi.start[1] = fmt->start[1]; 1155 fmt2->fmt.vbi.start[1] = fmt->start[1];
1180 fmt2->fmt.vbi.count[1] = fmt->count[1]; 1156 fmt2->fmt.vbi.count[1] = fmt->count[1];
1181 fmt2->fmt.vbi.flags = fmt->flags; 1157 fmt2->fmt.vbi.flags = fmt->flags;
1182 err = drv(inode, file, VIDIOC_TRY_FMT, fmt2); 1158 err = drv(file, VIDIOC_TRY_FMT, fmt2);
1183 if (err < 0) { 1159 if (err < 0) {
1184 dprintk("VIDIOCSVBIFMT / VIDIOC_TRY_FMT: %d\n", err); 1160 dprintk("VIDIOCSVBIFMT / VIDIOC_TRY_FMT: %d\n", err);
1185 goto done; 1161 goto done;
@@ -1196,7 +1172,7 @@ static noinline int v4l1_compat_set_vbi_format(
1196 err = -EINVAL; 1172 err = -EINVAL;
1197 goto done; 1173 goto done;
1198 } 1174 }
1199 err = drv(inode, file, VIDIOC_S_FMT, fmt2); 1175 err = drv(file, VIDIOC_S_FMT, fmt2);
1200 if (err < 0) 1176 if (err < 0)
1201 dprintk("VIDIOCSVBIFMT / VIDIOC_S_FMT: %d\n", err); 1177 dprintk("VIDIOCSVBIFMT / VIDIOC_S_FMT: %d\n", err);
1202done: 1178done:
@@ -1208,8 +1184,7 @@ done:
1208 * This function is exported. 1184 * This function is exported.
1209 */ 1185 */
1210int 1186int
1211v4l_compat_translate_ioctl(struct inode *inode, 1187v4l_compat_translate_ioctl(struct file *file,
1212 struct file *file,
1213 int cmd, 1188 int cmd,
1214 void *arg, 1189 void *arg,
1215 v4l2_kioctl drv) 1190 v4l2_kioctl drv)
@@ -1218,64 +1193,64 @@ v4l_compat_translate_ioctl(struct inode *inode,
1218 1193
1219 switch (cmd) { 1194 switch (cmd) {
1220 case VIDIOCGCAP: /* capability */ 1195 case VIDIOCGCAP: /* capability */
1221 err = v4l1_compat_get_capabilities(arg, inode, file, drv); 1196 err = v4l1_compat_get_capabilities(arg, file, drv);
1222 break; 1197 break;
1223 case VIDIOCGFBUF: /* get frame buffer */ 1198 case VIDIOCGFBUF: /* get frame buffer */
1224 err = v4l1_compat_get_frame_buffer(arg, inode, file, drv); 1199 err = v4l1_compat_get_frame_buffer(arg, file, drv);
1225 break; 1200 break;
1226 case VIDIOCSFBUF: /* set frame buffer */ 1201 case VIDIOCSFBUF: /* set frame buffer */
1227 err = v4l1_compat_set_frame_buffer(arg, inode, file, drv); 1202 err = v4l1_compat_set_frame_buffer(arg, file, drv);
1228 break; 1203 break;
1229 case VIDIOCGWIN: /* get window or capture dimensions */ 1204 case VIDIOCGWIN: /* get window or capture dimensions */
1230 err = v4l1_compat_get_win_cap_dimensions(arg, inode, file, drv); 1205 err = v4l1_compat_get_win_cap_dimensions(arg, file, drv);
1231 break; 1206 break;
1232 case VIDIOCSWIN: /* set window and/or capture dimensions */ 1207 case VIDIOCSWIN: /* set window and/or capture dimensions */
1233 err = v4l1_compat_set_win_cap_dimensions(arg, inode, file, drv); 1208 err = v4l1_compat_set_win_cap_dimensions(arg, file, drv);
1234 break; 1209 break;
1235 case VIDIOCCAPTURE: /* turn on/off preview */ 1210 case VIDIOCCAPTURE: /* turn on/off preview */
1236 err = v4l1_compat_turn_preview_on_off(arg, inode, file, drv); 1211 err = v4l1_compat_turn_preview_on_off(arg, file, drv);
1237 break; 1212 break;
1238 case VIDIOCGCHAN: /* get input information */ 1213 case VIDIOCGCHAN: /* get input information */
1239 err = v4l1_compat_get_input_info(arg, inode, file, drv); 1214 err = v4l1_compat_get_input_info(arg, file, drv);
1240 break; 1215 break;
1241 case VIDIOCSCHAN: /* set input */ 1216 case VIDIOCSCHAN: /* set input */
1242 err = v4l1_compat_set_input(arg, inode, file, drv); 1217 err = v4l1_compat_set_input(arg, file, drv);
1243 break; 1218 break;
1244 case VIDIOCGPICT: /* get tone controls & partial capture format */ 1219 case VIDIOCGPICT: /* get tone controls & partial capture format */
1245 err = v4l1_compat_get_picture(arg, inode, file, drv); 1220 err = v4l1_compat_get_picture(arg, file, drv);
1246 break; 1221 break;
1247 case VIDIOCSPICT: /* set tone controls & partial capture format */ 1222 case VIDIOCSPICT: /* set tone controls & partial capture format */
1248 err = v4l1_compat_set_picture(arg, inode, file, drv); 1223 err = v4l1_compat_set_picture(arg, file, drv);
1249 break; 1224 break;
1250 case VIDIOCGTUNER: /* get tuner information */ 1225 case VIDIOCGTUNER: /* get tuner information */
1251 err = v4l1_compat_get_tuner(arg, inode, file, drv); 1226 err = v4l1_compat_get_tuner(arg, file, drv);
1252 break; 1227 break;
1253 case VIDIOCSTUNER: /* select a tuner input */ 1228 case VIDIOCSTUNER: /* select a tuner input */
1254 err = v4l1_compat_select_tuner(arg, inode, file, drv); 1229 err = v4l1_compat_select_tuner(arg, file, drv);
1255 break; 1230 break;
1256 case VIDIOCGFREQ: /* get frequency */ 1231 case VIDIOCGFREQ: /* get frequency */
1257 err = v4l1_compat_get_frequency(arg, inode, file, drv); 1232 err = v4l1_compat_get_frequency(arg, file, drv);
1258 break; 1233 break;
1259 case VIDIOCSFREQ: /* set frequency */ 1234 case VIDIOCSFREQ: /* set frequency */
1260 err = v4l1_compat_set_frequency(arg, inode, file, drv); 1235 err = v4l1_compat_set_frequency(arg, file, drv);
1261 break; 1236 break;
1262 case VIDIOCGAUDIO: /* get audio properties/controls */ 1237 case VIDIOCGAUDIO: /* get audio properties/controls */
1263 err = v4l1_compat_get_audio(arg, inode, file, drv); 1238 err = v4l1_compat_get_audio(arg, file, drv);
1264 break; 1239 break;
1265 case VIDIOCSAUDIO: /* set audio controls */ 1240 case VIDIOCSAUDIO: /* set audio controls */
1266 err = v4l1_compat_set_audio(arg, inode, file, drv); 1241 err = v4l1_compat_set_audio(arg, file, drv);
1267 break; 1242 break;
1268 case VIDIOCMCAPTURE: /* capture a frame */ 1243 case VIDIOCMCAPTURE: /* capture a frame */
1269 err = v4l1_compat_capture_frame(arg, inode, file, drv); 1244 err = v4l1_compat_capture_frame(arg, file, drv);
1270 break; 1245 break;
1271 case VIDIOCSYNC: /* wait for a frame */ 1246 case VIDIOCSYNC: /* wait for a frame */
1272 err = v4l1_compat_sync(arg, inode, file, drv); 1247 err = v4l1_compat_sync(arg, file, drv);
1273 break; 1248 break;
1274 case VIDIOCGVBIFMT: /* query VBI data capture format */ 1249 case VIDIOCGVBIFMT: /* query VBI data capture format */
1275 err = v4l1_compat_get_vbi_format(arg, inode, file, drv); 1250 err = v4l1_compat_get_vbi_format(arg, file, drv);
1276 break; 1251 break;
1277 case VIDIOCSVBIFMT: 1252 case VIDIOCSVBIFMT:
1278 err = v4l1_compat_set_vbi_format(arg, inode, file, drv); 1253 err = v4l1_compat_set_vbi_format(arg, file, drv);
1279 break; 1254 break;
1280 default: 1255 default:
1281 err = -ENOIOCTLCMD; 1256 err = -ENOIOCTLCMD;
diff --git a/drivers/media/video/v4l2-int-device.c b/drivers/media/video/v4l2-int-device.c
index 0e4549922f26..a935bae538ef 100644
--- a/drivers/media/video/v4l2-int-device.c
+++ b/drivers/media/video/v4l2-int-device.c
@@ -32,7 +32,7 @@
32static DEFINE_MUTEX(mutex); 32static DEFINE_MUTEX(mutex);
33static LIST_HEAD(int_list); 33static LIST_HEAD(int_list);
34 34
35static void v4l2_int_device_try_attach_all(void) 35void v4l2_int_device_try_attach_all(void)
36{ 36{
37 struct v4l2_int_device *m, *s; 37 struct v4l2_int_device *m, *s;
38 38
@@ -66,6 +66,7 @@ static void v4l2_int_device_try_attach_all(void)
66 } 66 }
67 } 67 }
68} 68}
69EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all);
69 70
70static int ioctl_sort_cmp(const void *a, const void *b) 71static int ioctl_sort_cmp(const void *a, const void *b)
71{ 72{
@@ -144,6 +145,7 @@ int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd)
144 find_ioctl(d->u.slave, cmd, 145 find_ioctl(d->u.slave, cmd,
145 (v4l2_int_ioctl_func *)no_such_ioctl_0))(d); 146 (v4l2_int_ioctl_func *)no_such_ioctl_0))(d);
146} 147}
148EXPORT_SYMBOL_GPL(v4l2_int_ioctl_0);
147 149
148static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg) 150static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg)
149{ 151{
@@ -156,5 +158,6 @@ int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg)
156 find_ioctl(d->u.slave, cmd, 158 find_ioctl(d->u.slave, cmd,
157 (v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg); 159 (v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg);
158} 160}
161EXPORT_SYMBOL_GPL(v4l2_int_ioctl_1);
159 162
160MODULE_LICENSE("GPL"); 163MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 155c9d77a463..710e1a40c422 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -625,13 +625,13 @@ static int check_fmt(const struct v4l2_ioctl_ops *ops, enum v4l2_buf_type type)
625 return -EINVAL; 625 return -EINVAL;
626} 626}
627 627
628static int __video_do_ioctl(struct inode *inode, struct file *file, 628static int __video_do_ioctl(struct file *file,
629 unsigned int cmd, void *arg) 629 unsigned int cmd, void *arg)
630{ 630{
631 struct video_device *vfd = video_devdata(file); 631 struct video_device *vfd = video_devdata(file);
632 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; 632 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
633 void *fh = file->private_data; 633 void *fh = file->private_data;
634 int ret = -EINVAL; 634 int ret = -EINVAL;
635 635
636 if ((vfd->debug & V4L2_DEBUG_IOCTL) && 636 if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
637 !(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) { 637 !(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) {
@@ -675,7 +675,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
675 V4L2 ioctls. 675 V4L2 ioctls.
676 ********************************************************/ 676 ********************************************************/
677 if (_IOC_TYPE(cmd) == 'v' && _IOC_NR(cmd) < BASE_VIDIOCPRIVATE) 677 if (_IOC_TYPE(cmd) == 'v' && _IOC_NR(cmd) < BASE_VIDIOCPRIVATE)
678 return v4l_compat_translate_ioctl(inode, file, cmd, arg, 678 return v4l_compat_translate_ioctl(file, cmd, arg,
679 __video_do_ioctl); 679 __video_do_ioctl);
680#endif 680#endif
681 681
@@ -1768,7 +1768,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
1768 return ret; 1768 return ret;
1769} 1769}
1770 1770
1771int video_ioctl2(struct inode *inode, struct file *file, 1771int __video_ioctl2(struct file *file,
1772 unsigned int cmd, unsigned long arg) 1772 unsigned int cmd, unsigned long arg)
1773{ 1773{
1774 char sbuf[128]; 1774 char sbuf[128];
@@ -1832,7 +1832,7 @@ int video_ioctl2(struct inode *inode, struct file *file,
1832 } 1832 }
1833 1833
1834 /* Handles IOCTL */ 1834 /* Handles IOCTL */
1835 err = __video_do_ioctl(inode, file, cmd, parg); 1835 err = __video_do_ioctl(file, cmd, parg);
1836 if (err == -ENOIOCTLCMD) 1836 if (err == -ENOIOCTLCMD)
1837 err = -EINVAL; 1837 err = -EINVAL;
1838 if (is_ext_ctrl) { 1838 if (is_ext_ctrl) {
@@ -1860,4 +1860,11 @@ out:
1860 kfree(mbuf); 1860 kfree(mbuf);
1861 return err; 1861 return err;
1862} 1862}
1863EXPORT_SYMBOL(__video_ioctl2);
1864
1865int video_ioctl2(struct inode *inode, struct file *file,
1866 unsigned int cmd, unsigned long arg)
1867{
1868 return __video_ioctl2(file, cmd, arg);
1869}
1863EXPORT_SYMBOL(video_ioctl2); 1870EXPORT_SYMBOL(video_ioctl2);
diff --git a/drivers/media/video/videobuf-dvb.c b/drivers/media/video/videobuf-dvb.c
index 917277d36605..0e7dcba8e4ae 100644
--- a/drivers/media/video/videobuf-dvb.c
+++ b/drivers/media/video/videobuf-dvb.c
@@ -296,29 +296,7 @@ EXPORT_SYMBOL(videobuf_dvb_register_bus);
296 296
297void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f) 297void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f)
298{ 298{
299 struct list_head *list, *q; 299 videobuf_dvb_dealloc_frontends(f);
300 struct videobuf_dvb_frontend *fe;
301
302 mutex_lock(&f->lock);
303 list_for_each_safe(list, q, &f->felist) {
304 fe = list_entry(list, struct videobuf_dvb_frontend, felist);
305 if (fe->dvb.net.dvbdev) {
306 dvb_net_release(&fe->dvb.net);
307 fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
308 &fe->dvb.fe_mem);
309 fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
310 &fe->dvb.fe_hw);
311 dvb_dmxdev_release(&fe->dvb.dmxdev);
312 dvb_dmx_release(&fe->dvb.demux);
313 dvb_unregister_frontend(fe->dvb.frontend);
314 }
315 if (fe->dvb.frontend)
316 /* always allocated, may have been reset */
317 dvb_frontend_detach(fe->dvb.frontend);
318 list_del(list);
319 kfree(fe);
320 }
321 mutex_unlock(&f->lock);
322 300
323 dvb_unregister_adapter(&f->adapter); 301 dvb_unregister_adapter(&f->adapter);
324} 302}
@@ -389,3 +367,31 @@ fail_alloc:
389 return fe; 367 return fe;
390} 368}
391EXPORT_SYMBOL(videobuf_dvb_alloc_frontend); 369EXPORT_SYMBOL(videobuf_dvb_alloc_frontend);
370
371void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f)
372{
373 struct list_head *list, *q;
374 struct videobuf_dvb_frontend *fe;
375
376 mutex_lock(&f->lock);
377 list_for_each_safe(list, q, &f->felist) {
378 fe = list_entry(list, struct videobuf_dvb_frontend, felist);
379 if (fe->dvb.net.dvbdev) {
380 dvb_net_release(&fe->dvb.net);
381 fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
382 &fe->dvb.fe_mem);
383 fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
384 &fe->dvb.fe_hw);
385 dvb_dmxdev_release(&fe->dvb.dmxdev);
386 dvb_dmx_release(&fe->dvb.demux);
387 dvb_unregister_frontend(fe->dvb.frontend);
388 }
389 if (fe->dvb.frontend)
390 /* always allocated, may have been reset */
391 dvb_frontend_detach(fe->dvb.frontend);
392 list_del(list); /* remove list entry */
393 kfree(fe); /* free frontend allocation */
394 }
395 mutex_unlock(&f->lock);
396}
397EXPORT_SYMBOL(videobuf_dvb_dealloc_frontends);
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 7d7e51def461..e15e48f04be7 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -1163,11 +1163,11 @@ static int vivi_release(void)
1163 1163
1164 if (-1 != dev->vfd->minor) { 1164 if (-1 != dev->vfd->minor) {
1165 printk(KERN_INFO "%s: unregistering /dev/video%d\n", 1165 printk(KERN_INFO "%s: unregistering /dev/video%d\n",
1166 VIVI_MODULE_NAME, dev->vfd->minor); 1166 VIVI_MODULE_NAME, dev->vfd->num);
1167 video_unregister_device(dev->vfd); 1167 video_unregister_device(dev->vfd);
1168 } else { 1168 } else {
1169 printk(KERN_INFO "%s: releasing /dev/video%d\n", 1169 printk(KERN_INFO "%s: releasing /dev/video%d\n",
1170 VIVI_MODULE_NAME, dev->vfd->minor); 1170 VIVI_MODULE_NAME, dev->vfd->num);
1171 video_device_release(dev->vfd); 1171 video_device_release(dev->vfd);
1172 } 1172 }
1173 1173
@@ -1307,7 +1307,7 @@ static int __init vivi_init(void)
1307 1307
1308 dev->vfd = vfd; 1308 dev->vfd = vfd;
1309 printk(KERN_INFO "%s: V4L2 device registered as /dev/video%d\n", 1309 printk(KERN_INFO "%s: V4L2 device registered as /dev/video%d\n",
1310 VIVI_MODULE_NAME, vfd->minor); 1310 VIVI_MODULE_NAME, vfd->num);
1311 } 1311 }
1312 1312
1313 if (ret < 0) { 1313 if (ret < 0) {
diff --git a/drivers/media/video/w9968cf.c b/drivers/media/video/w9968cf.c
index dcd45dbd82dc..4dfb43bd1846 100644
--- a/drivers/media/video/w9968cf.c
+++ b/drivers/media/video/w9968cf.c
@@ -2398,7 +2398,7 @@ error:
2398 cam->sensor = CC_UNKNOWN; 2398 cam->sensor = CC_UNKNOWN;
2399 DBG(1, "Image sensor initialization failed for %s (/dev/video%d). " 2399 DBG(1, "Image sensor initialization failed for %s (/dev/video%d). "
2400 "Try to detach and attach this device again", 2400 "Try to detach and attach this device again",
2401 symbolic(camlist, cam->id), cam->v4ldev->minor) 2401 symbolic(camlist, cam->id), cam->v4ldev->num)
2402 return err; 2402 return err;
2403} 2403}
2404 2404
@@ -2644,7 +2644,7 @@ static void w9968cf_release_resources(struct w9968cf_device* cam)
2644{ 2644{
2645 mutex_lock(&w9968cf_devlist_mutex); 2645 mutex_lock(&w9968cf_devlist_mutex);
2646 2646
2647 DBG(2, "V4L device deregistered: /dev/video%d", cam->v4ldev->minor) 2647 DBG(2, "V4L device deregistered: /dev/video%d", cam->v4ldev->num)
2648 2648
2649 video_unregister_device(cam->v4ldev); 2649 video_unregister_device(cam->v4ldev);
2650 list_del(&cam->v4llist); 2650 list_del(&cam->v4llist);
@@ -2679,7 +2679,7 @@ static int w9968cf_open(struct inode* inode, struct file* filp)
2679 DBG(2, "No supported image sensor has been detected by the " 2679 DBG(2, "No supported image sensor has been detected by the "
2680 "'ovcamchip' module for the %s (/dev/video%d). Make " 2680 "'ovcamchip' module for the %s (/dev/video%d). Make "
2681 "sure it is loaded *before* (re)connecting the camera.", 2681 "sure it is loaded *before* (re)connecting the camera.",
2682 symbolic(camlist, cam->id), cam->v4ldev->minor) 2682 symbolic(camlist, cam->id), cam->v4ldev->num)
2683 mutex_unlock(&cam->dev_mutex); 2683 mutex_unlock(&cam->dev_mutex);
2684 up_read(&w9968cf_disconnect); 2684 up_read(&w9968cf_disconnect);
2685 return -ENODEV; 2685 return -ENODEV;
@@ -2687,7 +2687,7 @@ static int w9968cf_open(struct inode* inode, struct file* filp)
2687 2687
2688 if (cam->users) { 2688 if (cam->users) {
2689 DBG(2, "%s (/dev/video%d) has been already occupied by '%s'", 2689 DBG(2, "%s (/dev/video%d) has been already occupied by '%s'",
2690 symbolic(camlist, cam->id),cam->v4ldev->minor,cam->command) 2690 symbolic(camlist, cam->id), cam->v4ldev->num, cam->command)
2691 if ((filp->f_flags & O_NONBLOCK)||(filp->f_flags & O_NDELAY)) { 2691 if ((filp->f_flags & O_NONBLOCK)||(filp->f_flags & O_NDELAY)) {
2692 mutex_unlock(&cam->dev_mutex); 2692 mutex_unlock(&cam->dev_mutex);
2693 up_read(&w9968cf_disconnect); 2693 up_read(&w9968cf_disconnect);
@@ -2709,7 +2709,7 @@ static int w9968cf_open(struct inode* inode, struct file* filp)
2709 } 2709 }
2710 2710
2711 DBG(5, "Opening '%s', /dev/video%d ...", 2711 DBG(5, "Opening '%s', /dev/video%d ...",
2712 symbolic(camlist, cam->id), cam->v4ldev->minor) 2712 symbolic(camlist, cam->id), cam->v4ldev->num)
2713 2713
2714 cam->streaming = 0; 2714 cam->streaming = 0;
2715 cam->misconfigured = 0; 2715 cam->misconfigured = 0;
@@ -2947,7 +2947,7 @@ static int w9968cf_v4l_ioctl(struct inode* inode, struct file* filp,
2947 .minheight = cam->minheight, 2947 .minheight = cam->minheight,
2948 }; 2948 };
2949 sprintf(cap.name, "W996[87]CF USB Camera #%d", 2949 sprintf(cap.name, "W996[87]CF USB Camera #%d",
2950 cam->v4ldev->minor); 2950 cam->v4ldev->num);
2951 cap.maxwidth = (cam->upscaling && w9968cf_vpp) 2951 cap.maxwidth = (cam->upscaling && w9968cf_vpp)
2952 ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth) 2952 ? max((u16)W9968CF_MAX_WIDTH, cam->maxwidth)
2953 : cam->maxwidth; 2953 : cam->maxwidth;
@@ -3567,7 +3567,7 @@ w9968cf_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
3567 goto fail; 3567 goto fail;
3568 } 3568 }
3569 3569
3570 DBG(2, "V4L device registered as /dev/video%d", cam->v4ldev->minor) 3570 DBG(2, "V4L device registered as /dev/video%d", cam->v4ldev->num)
3571 3571
3572 /* Set some basic constants */ 3572 /* Set some basic constants */
3573 w9968cf_configure_camera(cam, udev, mod_id, dev_nr); 3573 w9968cf_configure_camera(cam, udev, mod_id, dev_nr);
@@ -3618,7 +3618,7 @@ static void w9968cf_usb_disconnect(struct usb_interface* intf)
3618 DBG(2, "The device is open (/dev/video%d)! " 3618 DBG(2, "The device is open (/dev/video%d)! "
3619 "Process name: %s. Deregistration and memory " 3619 "Process name: %s. Deregistration and memory "
3620 "deallocation are deferred on close.", 3620 "deallocation are deferred on close.",
3621 cam->v4ldev->minor, cam->command) 3621 cam->v4ldev->num, cam->command)
3622 cam->misconfigured = 1; 3622 cam->misconfigured = 1;
3623 w9968cf_stop_transfer(cam); 3623 w9968cf_stop_transfer(cam);
3624 wake_up_interruptible(&cam->wait_queue); 3624 wake_up_interruptible(&cam->wait_queue);
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index 6a0902bcba6b..9fc581707638 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -539,7 +539,7 @@ static int zc0301_stream_interrupt(struct zc0301_device* cam)
539 cam->state |= DEV_MISCONFIGURED; 539 cam->state |= DEV_MISCONFIGURED;
540 DBG(1, "URB timeout reached. The camera is misconfigured. To " 540 DBG(1, "URB timeout reached. The camera is misconfigured. To "
541 "use it, close and open /dev/video%d again.", 541 "use it, close and open /dev/video%d again.",
542 cam->v4ldev->minor); 542 cam->v4ldev->num);
543 return -EIO; 543 return -EIO;
544 } 544 }
545 545
@@ -640,7 +640,7 @@ static void zc0301_release_resources(struct kref *kref)
640{ 640{
641 struct zc0301_device *cam = container_of(kref, struct zc0301_device, 641 struct zc0301_device *cam = container_of(kref, struct zc0301_device,
642 kref); 642 kref);
643 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor); 643 DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
644 video_set_drvdata(cam->v4ldev, NULL); 644 video_set_drvdata(cam->v4ldev, NULL);
645 video_unregister_device(cam->v4ldev); 645 video_unregister_device(cam->v4ldev);
646 usb_put_dev(cam->usbdev); 646 usb_put_dev(cam->usbdev);
@@ -679,7 +679,7 @@ static int zc0301_open(struct inode* inode, struct file* filp)
679 } 679 }
680 680
681 if (cam->users) { 681 if (cam->users) {
682 DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->minor); 682 DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->num);
683 DBG(3, "Simultaneous opens are not supported"); 683 DBG(3, "Simultaneous opens are not supported");
684 if ((filp->f_flags & O_NONBLOCK) || 684 if ((filp->f_flags & O_NONBLOCK) ||
685 (filp->f_flags & O_NDELAY)) { 685 (filp->f_flags & O_NDELAY)) {
@@ -722,7 +722,7 @@ static int zc0301_open(struct inode* inode, struct file* filp)
722 cam->frame_count = 0; 722 cam->frame_count = 0;
723 zc0301_empty_framequeues(cam); 723 zc0301_empty_framequeues(cam);
724 724
725 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor); 725 DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
726 726
727out: 727out:
728 mutex_unlock(&cam->open_mutex); 728 mutex_unlock(&cam->open_mutex);
@@ -746,7 +746,7 @@ static int zc0301_release(struct inode* inode, struct file* filp)
746 cam->users--; 746 cam->users--;
747 wake_up_interruptible_nr(&cam->wait_open, 1); 747 wake_up_interruptible_nr(&cam->wait_open, 1);
748 748
749 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor); 749 DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
750 750
751 kref_put(&cam->kref, zc0301_release_resources); 751 kref_put(&cam->kref, zc0301_release_resources);
752 752
@@ -1275,7 +1275,7 @@ zc0301_vidioc_s_crop(struct zc0301_device* cam, void __user * arg)
1275 cam->state |= DEV_MISCONFIGURED; 1275 cam->state |= DEV_MISCONFIGURED;
1276 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To " 1276 DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
1277 "use the camera, close and open /dev/video%d again.", 1277 "use the camera, close and open /dev/video%d again.",
1278 cam->v4ldev->minor); 1278 cam->v4ldev->num);
1279 return -EIO; 1279 return -EIO;
1280 } 1280 }
1281 1281
@@ -1288,7 +1288,7 @@ zc0301_vidioc_s_crop(struct zc0301_device* cam, void __user * arg)
1288 cam->state |= DEV_MISCONFIGURED; 1288 cam->state |= DEV_MISCONFIGURED;
1289 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To " 1289 DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
1290 "use the camera, close and open /dev/video%d again.", 1290 "use the camera, close and open /dev/video%d again.",
1291 cam->v4ldev->minor); 1291 cam->v4ldev->num);
1292 return -ENOMEM; 1292 return -ENOMEM;
1293 } 1293 }
1294 1294
@@ -1470,7 +1470,7 @@ zc0301_vidioc_try_s_fmt(struct zc0301_device* cam, unsigned int cmd,
1470 cam->state |= DEV_MISCONFIGURED; 1470 cam->state |= DEV_MISCONFIGURED;
1471 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To " 1471 DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
1472 "use the camera, close and open /dev/video%d again.", 1472 "use the camera, close and open /dev/video%d again.",
1473 cam->v4ldev->minor); 1473 cam->v4ldev->num);
1474 return -EIO; 1474 return -EIO;
1475 } 1475 }
1476 1476
@@ -1482,7 +1482,7 @@ zc0301_vidioc_try_s_fmt(struct zc0301_device* cam, unsigned int cmd,
1482 cam->state |= DEV_MISCONFIGURED; 1482 cam->state |= DEV_MISCONFIGURED;
1483 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To " 1483 DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
1484 "use the camera, close and open /dev/video%d again.", 1484 "use the camera, close and open /dev/video%d again.",
1485 cam->v4ldev->minor); 1485 cam->v4ldev->num);
1486 return -ENOMEM; 1486 return -ENOMEM;
1487 } 1487 }
1488 1488
@@ -1529,7 +1529,7 @@ zc0301_vidioc_s_jpegcomp(struct zc0301_device* cam, void __user * arg)
1529 cam->state |= DEV_MISCONFIGURED; 1529 cam->state |= DEV_MISCONFIGURED;
1530 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware " 1530 DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
1531 "problems. To use the camera, close and open " 1531 "problems. To use the camera, close and open "
1532 "/dev/video%d again.", cam->v4ldev->minor); 1532 "/dev/video%d again.", cam->v4ldev->num);
1533 return -EIO; 1533 return -EIO;
1534 } 1534 }
1535 1535
@@ -2005,7 +2005,7 @@ zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
2005 goto fail; 2005 goto fail;
2006 } 2006 }
2007 2007
2008 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor); 2008 DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
2009 2009
2010 cam->module_param.force_munmap = force_munmap[dev_nr]; 2010 cam->module_param.force_munmap = force_munmap[dev_nr];
2011 cam->module_param.frame_timeout = frame_timeout[dev_nr]; 2011 cam->module_param.frame_timeout = frame_timeout[dev_nr];
@@ -2044,7 +2044,7 @@ static void zc0301_usb_disconnect(struct usb_interface* intf)
2044 if (cam->users) { 2044 if (cam->users) {
2045 DBG(2, "Device /dev/video%d is open! Deregistration and " 2045 DBG(2, "Device /dev/video%d is open! Deregistration and "
2046 "memory deallocation are deferred.", 2046 "memory deallocation are deferred.",
2047 cam->v4ldev->minor); 2047 cam->v4ldev->num);
2048 cam->state |= DEV_MISCONFIGURED; 2048 cam->state |= DEV_MISCONFIGURED;
2049 zc0301_stop_transfer(cam); 2049 zc0301_stop_transfer(cam);
2050 cam->state |= DEV_DISCONNECTED; 2050 cam->state |= DEV_DISCONNECTED;
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 7cdac99deea6..a1d81ed44c7c 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -885,7 +885,7 @@ static int zr364xx_probe(struct usb_interface *intf,
885 usb_set_intfdata(intf, cam); 885 usb_set_intfdata(intf, cam);
886 886
887 dev_info(&udev->dev, DRIVER_DESC " controlling video device %d\n", 887 dev_info(&udev->dev, DRIVER_DESC " controlling video device %d\n",
888 cam->vdev->minor); 888 cam->vdev->num);
889 return 0; 889 return 0;
890} 890}
891 891
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 68e237b830ad..0acefe8aff87 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -17,7 +17,7 @@ wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o
17obj-$(CONFIG_MFD_WM8350) += wm8350.o 17obj-$(CONFIG_MFD_WM8350) += wm8350.o
18obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o 18obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
19 19
20obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o 20obj-$(CONFIG_TWL4030_CORE) += twl4030-core.o twl4030-irq.o
21 21
22obj-$(CONFIG_MFD_CORE) += mfd-core.o 22obj-$(CONFIG_MFD_CORE) += mfd-core.o
23 23
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 220e4371266b..170f9d47c2f9 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1374,31 +1374,31 @@ static int sm501_init_dev(struct sm501_devdata *sm)
1374static int sm501_plat_probe(struct platform_device *dev) 1374static int sm501_plat_probe(struct platform_device *dev)
1375{ 1375{
1376 struct sm501_devdata *sm; 1376 struct sm501_devdata *sm;
1377 int err; 1377 int ret;
1378 1378
1379 sm = kzalloc(sizeof(struct sm501_devdata), GFP_KERNEL); 1379 sm = kzalloc(sizeof(struct sm501_devdata), GFP_KERNEL);
1380 if (sm == NULL) { 1380 if (sm == NULL) {
1381 dev_err(&dev->dev, "no memory for device data\n"); 1381 dev_err(&dev->dev, "no memory for device data\n");
1382 err = -ENOMEM; 1382 ret = -ENOMEM;
1383 goto err1; 1383 goto err1;
1384 } 1384 }
1385 1385
1386 sm->dev = &dev->dev; 1386 sm->dev = &dev->dev;
1387 sm->pdev_id = dev->id; 1387 sm->pdev_id = dev->id;
1388 sm->irq = platform_get_irq(dev, 0);
1389 sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
1390 sm->mem_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
1391 sm->platdata = dev->dev.platform_data; 1388 sm->platdata = dev->dev.platform_data;
1392 1389
1393 if (sm->irq < 0) { 1390 ret = platform_get_irq(dev, 0);
1391 if (ret < 0) {
1394 dev_err(&dev->dev, "failed to get irq resource\n"); 1392 dev_err(&dev->dev, "failed to get irq resource\n");
1395 err = sm->irq;
1396 goto err_res; 1393 goto err_res;
1397 } 1394 }
1395 sm->irq = ret;
1398 1396
1397 sm->io_res = platform_get_resource(dev, IORESOURCE_MEM, 1);
1398 sm->mem_res = platform_get_resource(dev, IORESOURCE_MEM, 0);
1399 if (sm->io_res == NULL || sm->mem_res == NULL) { 1399 if (sm->io_res == NULL || sm->mem_res == NULL) {
1400 dev_err(&dev->dev, "failed to get IO resource\n"); 1400 dev_err(&dev->dev, "failed to get IO resource\n");
1401 err = -ENOENT; 1401 ret = -ENOENT;
1402 goto err_res; 1402 goto err_res;
1403 } 1403 }
1404 1404
@@ -1407,7 +1407,7 @@ static int sm501_plat_probe(struct platform_device *dev)
1407 1407
1408 if (sm->regs_claim == NULL) { 1408 if (sm->regs_claim == NULL) {
1409 dev_err(&dev->dev, "cannot claim registers\n"); 1409 dev_err(&dev->dev, "cannot claim registers\n");
1410 err= -EBUSY; 1410 ret = -EBUSY;
1411 goto err_res; 1411 goto err_res;
1412 } 1412 }
1413 1413
@@ -1418,7 +1418,7 @@ static int sm501_plat_probe(struct platform_device *dev)
1418 1418
1419 if (sm->regs == NULL) { 1419 if (sm->regs == NULL) {
1420 dev_err(&dev->dev, "cannot remap registers\n"); 1420 dev_err(&dev->dev, "cannot remap registers\n");
1421 err = -EIO; 1421 ret = -EIO;
1422 goto err_claim; 1422 goto err_claim;
1423 } 1423 }
1424 1424
@@ -1430,7 +1430,7 @@ static int sm501_plat_probe(struct platform_device *dev)
1430 err_res: 1430 err_res:
1431 kfree(sm); 1431 kfree(sm);
1432 err1: 1432 err1:
1433 return err; 1433 return ret;
1434 1434
1435} 1435}
1436 1436
@@ -1625,8 +1625,7 @@ static int sm501_pci_probe(struct pci_dev *dev,
1625 goto err3; 1625 goto err3;
1626 } 1626 }
1627 1627
1628 sm->regs = ioremap(pci_resource_start(dev, 1), 1628 sm->regs = pci_ioremap_bar(dev, 1);
1629 pci_resource_len(dev, 1));
1630 1629
1631 if (sm->regs == NULL) { 1630 if (sm->regs == NULL) {
1632 dev_err(&dev->dev, "cannot remap registers\n"); 1631 dev_err(&dev->dev, "cannot remap registers\n");
diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c
index fd9a0160202c..dd843c4fbcc7 100644
--- a/drivers/mfd/twl4030-core.c
+++ b/drivers/mfd/twl4030-core.c
@@ -27,15 +27,11 @@
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 */ 28 */
29 29
30#include <linux/kernel_stat.h>
31#include <linux/init.h> 30#include <linux/init.h>
32#include <linux/mutex.h> 31#include <linux/mutex.h>
33#include <linux/interrupt.h>
34#include <linux/irq.h>
35#include <linux/random.h>
36#include <linux/kthread.h>
37#include <linux/platform_device.h> 32#include <linux/platform_device.h>
38#include <linux/clk.h> 33#include <linux/clk.h>
34#include <linux/err.h>
39 35
40#include <linux/i2c.h> 36#include <linux/i2c.h>
41#include <linux/i2c/twl4030.h> 37#include <linux/i2c/twl4030.h>
@@ -93,26 +89,6 @@
93#define twl_has_usb() false 89#define twl_has_usb() false
94#endif 90#endif
95 91
96static inline void activate_irq(int irq)
97{
98#ifdef CONFIG_ARM
99 /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
100 * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
101 */
102 set_irq_flags(irq, IRQF_VALID);
103#else
104 /* same effect on other architectures */
105 set_irq_noprobe(irq);
106#endif
107}
108
109/* Primary Interrupt Handler on TWL4030 Registers */
110
111/* Register Definitions */
112
113#define REG_PIH_ISR_P1 (0x1)
114#define REG_PIH_ISR_P2 (0x2)
115#define REG_PIH_SIR (0x3)
116 92
117/* Triton Core internal information (BEGIN) */ 93/* Triton Core internal information (BEGIN) */
118 94
@@ -175,138 +151,6 @@ static inline void activate_irq(int irq)
175 151
176/*----------------------------------------------------------------------*/ 152/*----------------------------------------------------------------------*/
177 153
178/**
179 * struct twl4030_mod_iregs - TWL module IMR/ISR regs to mask/clear at init
180 * @mod_no: TWL4030 module number (e.g., TWL4030_MODULE_GPIO)
181 * @sih_ctrl: address of module SIH_CTRL register
182 * @reg_cnt: number of IMR/ISR regs
183 * @imrs: pointer to array of TWL module interrupt mask register indices
184 * @isrs: pointer to array of TWL module interrupt status register indices
185 *
186 * Ties together TWL4030 modules and lists of IMR/ISR registers to mask/clear
187 * during twl_init_irq().
188 */
189struct twl4030_mod_iregs {
190 const u8 mod_no;
191 const u8 sih_ctrl;
192 const u8 reg_cnt;
193 const u8 *imrs;
194 const u8 *isrs;
195};
196
197/* TWL4030 INT module interrupt mask registers */
198static const u8 __initconst twl4030_int_imr_regs[] = {
199 TWL4030_INT_PWR_IMR1,
200 TWL4030_INT_PWR_IMR2,
201};
202
203/* TWL4030 INT module interrupt status registers */
204static const u8 __initconst twl4030_int_isr_regs[] = {
205 TWL4030_INT_PWR_ISR1,
206 TWL4030_INT_PWR_ISR2,
207};
208
209/* TWL4030 INTERRUPTS module interrupt mask registers */
210static const u8 __initconst twl4030_interrupts_imr_regs[] = {
211 TWL4030_INTERRUPTS_BCIIMR1A,
212 TWL4030_INTERRUPTS_BCIIMR1B,
213 TWL4030_INTERRUPTS_BCIIMR2A,
214 TWL4030_INTERRUPTS_BCIIMR2B,
215};
216
217/* TWL4030 INTERRUPTS module interrupt status registers */
218static const u8 __initconst twl4030_interrupts_isr_regs[] = {
219 TWL4030_INTERRUPTS_BCIISR1A,
220 TWL4030_INTERRUPTS_BCIISR1B,
221 TWL4030_INTERRUPTS_BCIISR2A,
222 TWL4030_INTERRUPTS_BCIISR2B,
223};
224
225/* TWL4030 MADC module interrupt mask registers */
226static const u8 __initconst twl4030_madc_imr_regs[] = {
227 TWL4030_MADC_IMR1,
228 TWL4030_MADC_IMR2,
229};
230
231/* TWL4030 MADC module interrupt status registers */
232static const u8 __initconst twl4030_madc_isr_regs[] = {
233 TWL4030_MADC_ISR1,
234 TWL4030_MADC_ISR2,
235};
236
237/* TWL4030 keypad module interrupt mask registers */
238static const u8 __initconst twl4030_keypad_imr_regs[] = {
239 TWL4030_KEYPAD_KEYP_IMR1,
240 TWL4030_KEYPAD_KEYP_IMR2,
241};
242
243/* TWL4030 keypad module interrupt status registers */
244static const u8 __initconst twl4030_keypad_isr_regs[] = {
245 TWL4030_KEYPAD_KEYP_ISR1,
246 TWL4030_KEYPAD_KEYP_ISR2,
247};
248
249/* TWL4030 GPIO module interrupt mask registers */
250static const u8 __initconst twl4030_gpio_imr_regs[] = {
251 REG_GPIO_IMR1A,
252 REG_GPIO_IMR1B,
253 REG_GPIO_IMR2A,
254 REG_GPIO_IMR2B,
255 REG_GPIO_IMR3A,
256 REG_GPIO_IMR3B,
257};
258
259/* TWL4030 GPIO module interrupt status registers */
260static const u8 __initconst twl4030_gpio_isr_regs[] = {
261 REG_GPIO_ISR1A,
262 REG_GPIO_ISR1B,
263 REG_GPIO_ISR2A,
264 REG_GPIO_ISR2B,
265 REG_GPIO_ISR3A,
266 REG_GPIO_ISR3B,
267};
268
269/* TWL4030 modules that have IMR/ISR registers that must be masked/cleared */
270static const struct twl4030_mod_iregs __initconst twl4030_mod_regs[] = {
271 {
272 .mod_no = TWL4030_MODULE_INT,
273 .sih_ctrl = TWL4030_INT_PWR_SIH_CTRL,
274 .reg_cnt = ARRAY_SIZE(twl4030_int_imr_regs),
275 .imrs = twl4030_int_imr_regs,
276 .isrs = twl4030_int_isr_regs,
277 },
278 {
279 .mod_no = TWL4030_MODULE_INTERRUPTS,
280 .sih_ctrl = TWL4030_INTERRUPTS_BCISIHCTRL,
281 .reg_cnt = ARRAY_SIZE(twl4030_interrupts_imr_regs),
282 .imrs = twl4030_interrupts_imr_regs,
283 .isrs = twl4030_interrupts_isr_regs,
284 },
285 {
286 .mod_no = TWL4030_MODULE_MADC,
287 .sih_ctrl = TWL4030_MADC_SIH_CTRL,
288 .reg_cnt = ARRAY_SIZE(twl4030_madc_imr_regs),
289 .imrs = twl4030_madc_imr_regs,
290 .isrs = twl4030_madc_isr_regs,
291 },
292 {
293 .mod_no = TWL4030_MODULE_KEYPAD,
294 .sih_ctrl = TWL4030_KEYPAD_KEYP_SIH_CTRL,
295 .reg_cnt = ARRAY_SIZE(twl4030_keypad_imr_regs),
296 .imrs = twl4030_keypad_imr_regs,
297 .isrs = twl4030_keypad_isr_regs,
298 },
299 {
300 .mod_no = TWL4030_MODULE_GPIO,
301 .sih_ctrl = REG_GPIO_SIH_CTRL,
302 .reg_cnt = ARRAY_SIZE(twl4030_gpio_imr_regs),
303 .imrs = twl4030_gpio_imr_regs,
304 .isrs = twl4030_gpio_isr_regs,
305 },
306};
307
308/*----------------------------------------------------------------*/
309
310/* is driver active, bound to a chip? */ 154/* is driver active, bound to a chip? */
311static bool inuse; 155static bool inuse;
312 156
@@ -367,33 +211,6 @@ static struct twl4030mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
367 211
368/*----------------------------------------------------------------------*/ 212/*----------------------------------------------------------------------*/
369 213
370/*
371 * TWL4030 doesn't have PIH mask, hence dummy function for mask
372 * and unmask of the (eight) interrupts reported at that level ...
373 * masking is only available from SIH (secondary) modules.
374 */
375
376static void twl4030_i2c_ackirq(unsigned int irq)
377{
378}
379
380static void twl4030_i2c_disableint(unsigned int irq)
381{
382}
383
384static void twl4030_i2c_enableint(unsigned int irq)
385{
386}
387
388static struct irq_chip twl4030_irq_chip = {
389 .name = "twl4030",
390 .ack = twl4030_i2c_ackirq,
391 .mask = twl4030_i2c_disableint,
392 .unmask = twl4030_i2c_enableint,
393};
394
395/*----------------------------------------------------------------------*/
396
397/* Exported Functions */ 214/* Exported Functions */
398 215
399/** 216/**
@@ -535,108 +352,11 @@ EXPORT_SYMBOL(twl4030_i2c_read_u8);
535 352
536/*----------------------------------------------------------------------*/ 353/*----------------------------------------------------------------------*/
537 354
538static unsigned twl4030_irq_base;
539
540static struct completion irq_event;
541
542/*
543 * This thread processes interrupts reported by the Primary Interrupt Handler.
544 */
545static int twl4030_irq_thread(void *data)
546{
547 long irq = (long)data;
548 irq_desc_t *desc = irq_desc + irq;
549 static unsigned i2c_errors;
550 const static unsigned max_i2c_errors = 100;
551
552 current->flags |= PF_NOFREEZE;
553
554 while (!kthread_should_stop()) {
555 int ret;
556 int module_irq;
557 u8 pih_isr;
558
559 /* Wait for IRQ, then read PIH irq status (also blocking) */
560 wait_for_completion_interruptible(&irq_event);
561
562 ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
563 REG_PIH_ISR_P1);
564 if (ret) {
565 pr_warning("%s: I2C error %d reading PIH ISR\n",
566 DRIVER_NAME, ret);
567 if (++i2c_errors >= max_i2c_errors) {
568 printk(KERN_ERR "Maximum I2C error count"
569 " exceeded. Terminating %s.\n",
570 __func__);
571 break;
572 }
573 complete(&irq_event);
574 continue;
575 }
576
577 /* these handlers deal with the relevant SIH irq status */
578 local_irq_disable();
579 for (module_irq = twl4030_irq_base;
580 pih_isr;
581 pih_isr >>= 1, module_irq++) {
582 if (pih_isr & 0x1) {
583 irq_desc_t *d = irq_desc + module_irq;
584
585 d->handle_irq(module_irq, d);
586 }
587 }
588 local_irq_enable();
589
590 desc->chip->unmask(irq);
591 }
592
593 return 0;
594}
595
596/* 355/*
597 * do_twl4030_irq() is the desc->handle method for the twl4030 interrupt. 356 * NOTE: We know the first 8 IRQs after pdata->base_irq are
598 * This is a chained interrupt, so there is no desc->action method for it. 357 * for the PIH, and the next are for the PWR_INT SIH, since
599 * Now we need to query the interrupt controller in the twl4030 to determine 358 * that's how twl_init_irq() sets things up.
600 * which module is generating the interrupt request. However, we can't do i2c
601 * transactions in interrupt context, so we must defer that work to a kernel
602 * thread. All we do here is acknowledge and mask the interrupt and wakeup
603 * the kernel thread.
604 */ 359 */
605static void do_twl4030_irq(unsigned int irq, irq_desc_t *desc)
606{
607 const unsigned int cpu = smp_processor_id();
608
609 /*
610 * Earlier this was desc->triggered = 1;
611 */
612 desc->status |= IRQ_LEVEL;
613
614 /*
615 * Acknowledge, clear _AND_ disable the interrupt.
616 */
617 desc->chip->ack(irq);
618
619 if (!desc->depth) {
620 kstat_cpu(cpu).irqs[irq]++;
621
622 complete(&irq_event);
623 }
624}
625
626static struct task_struct * __init start_twl4030_irq_thread(long irq)
627{
628 struct task_struct *thread;
629
630 init_completion(&irq_event);
631 thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq");
632 if (!thread)
633 pr_err("%s: could not create twl4030 irq %ld thread!\n",
634 DRIVER_NAME, irq);
635
636 return thread;
637}
638
639/*----------------------------------------------------------------------*/
640 360
641static int add_children(struct twl4030_platform_data *pdata) 361static int add_children(struct twl4030_platform_data *pdata)
642{ 362{
@@ -668,7 +388,7 @@ static int add_children(struct twl4030_platform_data *pdata)
668 388
669 if (status == 0) { 389 if (status == 0) {
670 struct resource r = { 390 struct resource r = {
671 .start = TWL4030_PWRIRQ_CHG_PRES, 391 .start = pdata->irq_base + 8 + 1,
672 .flags = IORESOURCE_IRQ, 392 .flags = IORESOURCE_IRQ,
673 }; 393 };
674 394
@@ -817,8 +537,7 @@ static int add_children(struct twl4030_platform_data *pdata)
817 /* RTC module IRQ */ 537 /* RTC module IRQ */
818 if (status == 0) { 538 if (status == 0) {
819 struct resource r = { 539 struct resource r = {
820 /* REVISIT don't hard-wire this stuff */ 540 .start = pdata->irq_base + 8 + 3,
821 .start = TWL4030_PWRIRQ_RTC,
822 .flags = IORESOURCE_IRQ, 541 .flags = IORESOURCE_IRQ,
823 }; 542 };
824 543
@@ -863,7 +582,7 @@ static int add_children(struct twl4030_platform_data *pdata)
863 582
864 if (status == 0) { 583 if (status == 0) {
865 struct resource r = { 584 struct resource r = {
866 .start = TWL4030_PWRIRQ_USB_PRES, 585 .start = pdata->irq_base + 8 + 2,
867 .flags = IORESOURCE_IRQ, 586 .flags = IORESOURCE_IRQ,
868 }; 587 };
869 588
@@ -965,123 +684,17 @@ static void __init clocks_init(void)
965 684
966/*----------------------------------------------------------------------*/ 685/*----------------------------------------------------------------------*/
967 686
968/** 687int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end);
969 * twl4030_i2c_clear_isr - clear TWL4030 SIH ISR regs via read + write 688int twl_exit_irq(void);
970 * @mod_no: TWL4030 module number
971 * @reg: register index to clear
972 * @cor: value of the <module>_SIH_CTRL.COR bit (1 or 0)
973 *
974 * Either reads (cor == 1) or writes (cor == 0) to a TWL4030 interrupt
975 * status register to ensure that any prior interrupts are cleared.
976 * Returns the status from the I2C read operation.
977 */
978static int __init twl4030_i2c_clear_isr(u8 mod_no, u8 reg, u8 cor)
979{
980 u8 tmp;
981
982 return (cor) ? twl4030_i2c_read_u8(mod_no, &tmp, reg) :
983 twl4030_i2c_write_u8(mod_no, 0xff, reg);
984}
985
986/**
987 * twl4030_read_cor_bit - are TWL module ISRs cleared by reads or writes?
988 * @mod_no: TWL4030 module number
989 * @reg: register index to clear
990 *
991 * Returns 1 if the TWL4030 SIH interrupt status registers (ISRs) for
992 * the specified TWL module are cleared by reads, or 0 if cleared by
993 * writes.
994 */
995static int twl4030_read_cor_bit(u8 mod_no, u8 reg)
996{
997 u8 tmp = 0;
998
999 WARN_ON(twl4030_i2c_read_u8(mod_no, &tmp, reg) < 0);
1000
1001 tmp &= TWL4030_SIH_CTRL_COR_MASK;
1002 tmp >>= __ffs(TWL4030_SIH_CTRL_COR_MASK);
1003
1004 return tmp;
1005}
1006
1007/**
1008 * twl4030_mask_clear_intrs - mask and clear all TWL4030 interrupts
1009 * @t: pointer to twl4030_mod_iregs array
1010 * @t_sz: ARRAY_SIZE(t) (starting at 1)
1011 *
1012 * Mask all TWL4030 interrupt mask registers (IMRs) and clear all
1013 * interrupt status registers (ISRs). No return value, but will WARN if
1014 * any I2C operations fail.
1015 */
1016static void __init twl4030_mask_clear_intrs(const struct twl4030_mod_iregs *t,
1017 const u8 t_sz)
1018{
1019 int i, j;
1020
1021 /*
1022 * N.B. - further efficiency is possible here. Eight I2C
1023 * operations on BCI and GPIO modules are avoidable if I2C
1024 * burst read/write transactions were implemented. Would
1025 * probably save about 1ms of boot time and a small amount of
1026 * power.
1027 */
1028 for (i = 0; i < t_sz; i++) {
1029 const struct twl4030_mod_iregs tmr = t[i];
1030 int cor;
1031
1032 /* Are ISRs cleared by reads or writes? */
1033 cor = twl4030_read_cor_bit(tmr.mod_no, tmr.sih_ctrl);
1034
1035 for (j = 0; j < tmr.reg_cnt; j++) {
1036
1037 /* Mask interrupts at the TWL4030 */
1038 WARN_ON(twl4030_i2c_write_u8(tmr.mod_no, 0xff,
1039 tmr.imrs[j]) < 0);
1040
1041 /* Clear TWL4030 ISRs */
1042 WARN_ON(twl4030_i2c_clear_isr(tmr.mod_no,
1043 tmr.isrs[j], cor) < 0);
1044 }
1045 }
1046}
1047
1048
1049static void twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
1050{
1051 int i;
1052
1053 /*
1054 * Mask and clear all TWL4030 interrupts since initially we do
1055 * not have any TWL4030 module interrupt handlers present
1056 */
1057 twl4030_mask_clear_intrs(twl4030_mod_regs,
1058 ARRAY_SIZE(twl4030_mod_regs));
1059
1060 twl4030_irq_base = irq_base;
1061
1062 /* install an irq handler for each of the PIH modules */
1063 for (i = irq_base; i < irq_end; i++) {
1064 set_irq_chip_and_handler(i, &twl4030_irq_chip,
1065 handle_simple_irq);
1066 activate_irq(i);
1067 }
1068
1069 /* install an irq handler to demultiplex the TWL4030 interrupt */
1070 set_irq_data(irq_num, start_twl4030_irq_thread(irq_num));
1071 set_irq_chained_handler(irq_num, do_twl4030_irq);
1072}
1073
1074/*----------------------------------------------------------------------*/
1075 689
1076static int twl4030_remove(struct i2c_client *client) 690static int twl4030_remove(struct i2c_client *client)
1077{ 691{
1078 unsigned i; 692 unsigned i;
693 int status;
1079 694
1080 /* FIXME undo twl_init_irq() */ 695 status = twl_exit_irq();
1081 if (twl4030_irq_base) { 696 if (status < 0)
1082 dev_err(&client->dev, "can't yet clean up IRQs?\n"); 697 return status;
1083 return -ENOSYS;
1084 }
1085 698
1086 for (i = 0; i < TWL4030_NUM_SLAVES; i++) { 699 for (i = 0; i < TWL4030_NUM_SLAVES; i++) {
1087 struct twl4030_client *twl = &twl4030_modules[i]; 700 struct twl4030_client *twl = &twl4030_modules[i];
@@ -1112,7 +725,7 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
1112 return -EIO; 725 return -EIO;
1113 } 726 }
1114 727
1115 if (inuse || twl4030_irq_base) { 728 if (inuse) {
1116 dev_dbg(&client->dev, "driver is already in use\n"); 729 dev_dbg(&client->dev, "driver is already in use\n");
1117 return -EBUSY; 730 return -EBUSY;
1118 } 731 }
@@ -1146,9 +759,9 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id)
1146 if (client->irq 759 if (client->irq
1147 && pdata->irq_base 760 && pdata->irq_base
1148 && pdata->irq_end > pdata->irq_base) { 761 && pdata->irq_end > pdata->irq_base) {
1149 twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end); 762 status = twl_init_irq(client->irq, pdata->irq_base, pdata->irq_end);
1150 dev_info(&client->dev, "IRQ %d chains IRQs %d..%d\n", 763 if (status < 0)
1151 client->irq, pdata->irq_base, pdata->irq_end - 1); 764 goto fail;
1152 } 765 }
1153 766
1154 status = add_children(pdata); 767 status = add_children(pdata);
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
new file mode 100644
index 000000000000..fae868a8d499
--- /dev/null
+++ b/drivers/mfd/twl4030-irq.c
@@ -0,0 +1,743 @@
1/*
2 * twl4030-irq.c - TWL4030/TPS659x0 irq support
3 *
4 * Copyright (C) 2005-2006 Texas Instruments, Inc.
5 *
6 * Modifications to defer interrupt handling to a kernel thread:
7 * Copyright (C) 2006 MontaVista Software, Inc.
8 *
9 * Based on tlv320aic23.c:
10 * Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
11 *
12 * Code cleanup and modifications to IRQ handler.
13 * by syed khasim <x0khasim@ti.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 */
29
30#include <linux/init.h>
31#include <linux/interrupt.h>
32#include <linux/irq.h>
33#include <linux/kthread.h>
34
35#include <linux/i2c/twl4030.h>
36
37
38/*
39 * TWL4030 IRQ handling has two stages in hardware, and thus in software.
40 * The Primary Interrupt Handler (PIH) stage exposes status bits saying
41 * which Secondary Interrupt Handler (SIH) stage is raising an interrupt.
42 * SIH modules are more traditional IRQ components, which support per-IRQ
43 * enable/disable and trigger controls; they do most of the work.
44 *
45 * These chips are designed to support IRQ handling from two different
46 * I2C masters. Each has a dedicated IRQ line, and dedicated IRQ status
47 * and mask registers in the PIH and SIH modules.
48 *
49 * We set up IRQs starting at a platform-specified base, always starting
50 * with PIH and the SIH for PWR_INT and then usually adding GPIO:
51 * base + 0 .. base + 7 PIH
52 * base + 8 .. base + 15 SIH for PWR_INT
53 * base + 16 .. base + 33 SIH for GPIO
54 */
55
56/* PIH register offsets */
57#define REG_PIH_ISR_P1 0x01
58#define REG_PIH_ISR_P2 0x02
59#define REG_PIH_SIR 0x03 /* for testing */
60
61
62/* Linux could (eventually) use either IRQ line */
63static int irq_line;
64
65struct sih {
66 char name[8];
67 u8 module; /* module id */
68 u8 control_offset; /* for SIH_CTRL */
69 bool set_cor;
70
71 u8 bits; /* valid in isr/imr */
72 u8 bytes_ixr; /* bytelen of ISR/IMR/SIR */
73
74 u8 edr_offset;
75 u8 bytes_edr; /* bytelen of EDR */
76
77 /* SIR ignored -- set interrupt, for testing only */
78 struct irq_data {
79 u8 isr_offset;
80 u8 imr_offset;
81 } mask[2];
82 /* + 2 bytes padding */
83};
84
85#define SIH_INITIALIZER(modname, nbits) \
86 .module = TWL4030_MODULE_ ## modname, \
87 .control_offset = TWL4030_ ## modname ## _SIH_CTRL, \
88 .bits = nbits, \
89 .bytes_ixr = DIV_ROUND_UP(nbits, 8), \
90 .edr_offset = TWL4030_ ## modname ## _EDR, \
91 .bytes_edr = DIV_ROUND_UP((2*(nbits)), 8), \
92 .mask = { { \
93 .isr_offset = TWL4030_ ## modname ## _ISR1, \
94 .imr_offset = TWL4030_ ## modname ## _IMR1, \
95 }, \
96 { \
97 .isr_offset = TWL4030_ ## modname ## _ISR2, \
98 .imr_offset = TWL4030_ ## modname ## _IMR2, \
99 }, },
100
101/* register naming policies are inconsistent ... */
102#define TWL4030_INT_PWR_EDR TWL4030_INT_PWR_EDR1
103#define TWL4030_MODULE_KEYPAD_KEYP TWL4030_MODULE_KEYPAD
104#define TWL4030_MODULE_INT_PWR TWL4030_MODULE_INT
105
106
107/* Order in this table matches order in PIH_ISR. That is,
108 * BIT(n) in PIH_ISR is sih_modules[n].
109 */
110static const struct sih sih_modules[6] = {
111 [0] = {
112 .name = "gpio",
113 .module = TWL4030_MODULE_GPIO,
114 .control_offset = REG_GPIO_SIH_CTRL,
115 .set_cor = true,
116 .bits = TWL4030_GPIO_MAX,
117 .bytes_ixr = 3,
118 /* Note: *all* of these IRQs default to no-trigger */
119 .edr_offset = REG_GPIO_EDR1,
120 .bytes_edr = 5,
121 .mask = { {
122 .isr_offset = REG_GPIO_ISR1A,
123 .imr_offset = REG_GPIO_IMR1A,
124 }, {
125 .isr_offset = REG_GPIO_ISR1B,
126 .imr_offset = REG_GPIO_IMR1B,
127 }, },
128 },
129 [1] = {
130 .name = "keypad",
131 .set_cor = true,
132 SIH_INITIALIZER(KEYPAD_KEYP, 4)
133 },
134 [2] = {
135 .name = "bci",
136 .module = TWL4030_MODULE_INTERRUPTS,
137 .control_offset = TWL4030_INTERRUPTS_BCISIHCTRL,
138 .bits = 12,
139 .bytes_ixr = 2,
140 .edr_offset = TWL4030_INTERRUPTS_BCIEDR1,
141 /* Note: most of these IRQs default to no-trigger */
142 .bytes_edr = 3,
143 .mask = { {
144 .isr_offset = TWL4030_INTERRUPTS_BCIISR1A,
145 .imr_offset = TWL4030_INTERRUPTS_BCIIMR1A,
146 }, {
147 .isr_offset = TWL4030_INTERRUPTS_BCIISR1B,
148 .imr_offset = TWL4030_INTERRUPTS_BCIIMR1B,
149 }, },
150 },
151 [3] = {
152 .name = "madc",
153 SIH_INITIALIZER(MADC, 4)
154 },
155 [4] = {
156 /* USB doesn't use the same SIH organization */
157 .name = "usb",
158 },
159 [5] = {
160 .name = "power",
161 .set_cor = true,
162 SIH_INITIALIZER(INT_PWR, 8)
163 },
164 /* there are no SIH modules #6 or #7 ... */
165};
166
167#undef TWL4030_MODULE_KEYPAD_KEYP
168#undef TWL4030_MODULE_INT_PWR
169#undef TWL4030_INT_PWR_EDR
170
171/*----------------------------------------------------------------------*/
172
173static unsigned twl4030_irq_base;
174
175static struct completion irq_event;
176
177/*
178 * This thread processes interrupts reported by the Primary Interrupt Handler.
179 */
180static int twl4030_irq_thread(void *data)
181{
182 long irq = (long)data;
183 irq_desc_t *desc = irq_desc + irq;
184 static unsigned i2c_errors;
185 const static unsigned max_i2c_errors = 100;
186
187 current->flags |= PF_NOFREEZE;
188
189 while (!kthread_should_stop()) {
190 int ret;
191 int module_irq;
192 u8 pih_isr;
193
194 /* Wait for IRQ, then read PIH irq status (also blocking) */
195 wait_for_completion_interruptible(&irq_event);
196
197 ret = twl4030_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
198 REG_PIH_ISR_P1);
199 if (ret) {
200 pr_warning("twl4030: I2C error %d reading PIH ISR\n",
201 ret);
202 if (++i2c_errors >= max_i2c_errors) {
203 printk(KERN_ERR "Maximum I2C error count"
204 " exceeded. Terminating %s.\n",
205 __func__);
206 break;
207 }
208 complete(&irq_event);
209 continue;
210 }
211
212 /* these handlers deal with the relevant SIH irq status */
213 local_irq_disable();
214 for (module_irq = twl4030_irq_base;
215 pih_isr;
216 pih_isr >>= 1, module_irq++) {
217 if (pih_isr & 0x1) {
218 irq_desc_t *d = irq_desc + module_irq;
219
220 /* These can't be masked ... always warn
221 * if we get any surprises.
222 */
223 if (d->status & IRQ_DISABLED)
224 note_interrupt(module_irq, d,
225 IRQ_NONE);
226 else
227 d->handle_irq(module_irq, d);
228 }
229 }
230 local_irq_enable();
231
232 desc->chip->unmask(irq);
233 }
234
235 return 0;
236}
237
238/*
239 * handle_twl4030_pih() is the desc->handle method for the twl4030 interrupt.
240 * This is a chained interrupt, so there is no desc->action method for it.
241 * Now we need to query the interrupt controller in the twl4030 to determine
242 * which module is generating the interrupt request. However, we can't do i2c
243 * transactions in interrupt context, so we must defer that work to a kernel
244 * thread. All we do here is acknowledge and mask the interrupt and wakeup
245 * the kernel thread.
246 */
247static void handle_twl4030_pih(unsigned int irq, irq_desc_t *desc)
248{
249 /* Acknowledge, clear *AND* mask the interrupt... */
250 desc->chip->ack(irq);
251 complete(&irq_event);
252}
253
254static struct task_struct *start_twl4030_irq_thread(long irq)
255{
256 struct task_struct *thread;
257
258 init_completion(&irq_event);
259 thread = kthread_run(twl4030_irq_thread, (void *)irq, "twl4030-irq");
260 if (!thread)
261 pr_err("twl4030: could not create irq %ld thread!\n", irq);
262
263 return thread;
264}
265
266/*----------------------------------------------------------------------*/
267
268/*
269 * twl4030_init_sih_modules() ... start from a known state where no
270 * IRQs will be coming in, and where we can quickly enable them then
271 * handle them as they arrive. Mask all IRQs: maybe init SIH_CTRL.
272 *
273 * NOTE: we don't touch EDR registers here; they stay with hardware
274 * defaults or whatever the last value was. Note that when both EDR
275 * bits for an IRQ are clear, that's as if its IMR bit is set...
276 */
277static int twl4030_init_sih_modules(unsigned line)
278{
279 const struct sih *sih;
280 u8 buf[4];
281 int i;
282 int status;
283
284 /* line 0 == int1_n signal; line 1 == int2_n signal */
285 if (line > 1)
286 return -EINVAL;
287
288 irq_line = line;
289
290 /* disable all interrupts on our line */
291 memset(buf, 0xff, sizeof buf);
292 sih = sih_modules;
293 for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) {
294
295 /* skip USB -- it's funky */
296 if (!sih->bytes_ixr)
297 continue;
298
299 status = twl4030_i2c_write(sih->module, buf,
300 sih->mask[line].imr_offset, sih->bytes_ixr);
301 if (status < 0)
302 pr_err("twl4030: err %d initializing %s %s\n",
303 status, sih->name, "IMR");
304
305 /* Maybe disable "exclusive" mode; buffer second pending irq;
306 * set Clear-On-Read (COR) bit.
307 *
308 * NOTE that sometimes COR polarity is documented as being
309 * inverted: for MADC and BCI, COR=1 means "clear on write".
310 * And for PWR_INT it's not documented...
311 */
312 if (sih->set_cor) {
313 status = twl4030_i2c_write_u8(sih->module,
314 TWL4030_SIH_CTRL_COR_MASK,
315 sih->control_offset);
316 if (status < 0)
317 pr_err("twl4030: err %d initializing %s %s\n",
318 status, sih->name, "SIH_CTRL");
319 }
320 }
321
322 sih = sih_modules;
323 for (i = 0; i < ARRAY_SIZE(sih_modules); i++, sih++) {
324 u8 rxbuf[4];
325 int j;
326
327 /* skip USB */
328 if (!sih->bytes_ixr)
329 continue;
330
331 /* Clear pending interrupt status. Either the read was
332 * enough, or we need to write those bits. Repeat, in
333 * case an IRQ is pending (PENDDIS=0) ... that's not
334 * uncommon with PWR_INT.PWRON.
335 */
336 for (j = 0; j < 2; j++) {
337 status = twl4030_i2c_read(sih->module, rxbuf,
338 sih->mask[line].isr_offset, sih->bytes_ixr);
339 if (status < 0)
340 pr_err("twl4030: err %d initializing %s %s\n",
341 status, sih->name, "ISR");
342
343 if (!sih->set_cor)
344 status = twl4030_i2c_write(sih->module, buf,
345 sih->mask[line].isr_offset,
346 sih->bytes_ixr);
347 /* else COR=1 means read sufficed.
348 * (for most SIH modules...)
349 */
350 }
351 }
352
353 return 0;
354}
355
356static inline void activate_irq(int irq)
357{
358#ifdef CONFIG_ARM
359 /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
360 * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
361 */
362 set_irq_flags(irq, IRQF_VALID);
363#else
364 /* same effect on other architectures */
365 set_irq_noprobe(irq);
366#endif
367}
368
369/*----------------------------------------------------------------------*/
370
371static DEFINE_SPINLOCK(sih_agent_lock);
372
373static struct workqueue_struct *wq;
374
375struct sih_agent {
376 int irq_base;
377 const struct sih *sih;
378
379 u32 imr;
380 bool imr_change_pending;
381 struct work_struct mask_work;
382
383 u32 edge_change;
384 struct work_struct edge_work;
385};
386
387static void twl4030_sih_do_mask(struct work_struct *work)
388{
389 struct sih_agent *agent;
390 const struct sih *sih;
391 union {
392 u8 bytes[4];
393 u32 word;
394 } imr;
395 int status;
396
397 agent = container_of(work, struct sih_agent, mask_work);
398
399 /* see what work we have */
400 spin_lock_irq(&sih_agent_lock);
401 if (agent->imr_change_pending) {
402 sih = agent->sih;
403 /* byte[0] gets overwritten as we write ... */
404 imr.word = cpu_to_le32(agent->imr << 8);
405 agent->imr_change_pending = false;
406 } else
407 sih = NULL;
408 spin_unlock_irq(&sih_agent_lock);
409 if (!sih)
410 return;
411
412 /* write the whole mask ... simpler than subsetting it */
413 status = twl4030_i2c_write(sih->module, imr.bytes,
414 sih->mask[irq_line].imr_offset, sih->bytes_ixr);
415 if (status)
416 pr_err("twl4030: %s, %s --> %d\n", __func__,
417 "write", status);
418}
419
420static void twl4030_sih_do_edge(struct work_struct *work)
421{
422 struct sih_agent *agent;
423 const struct sih *sih;
424 u8 bytes[6];
425 u32 edge_change;
426 int status;
427
428 agent = container_of(work, struct sih_agent, edge_work);
429
430 /* see what work we have */
431 spin_lock_irq(&sih_agent_lock);
432 edge_change = agent->edge_change;
433 agent->edge_change = 0;;
434 sih = edge_change ? agent->sih : NULL;
435 spin_unlock_irq(&sih_agent_lock);
436 if (!sih)
437 return;
438
439 /* Read, reserving first byte for write scratch. Yes, this
440 * could be cached for some speedup ... but be careful about
441 * any processor on the other IRQ line, EDR registers are
442 * shared.
443 */
444 status = twl4030_i2c_read(sih->module, bytes + 1,
445 sih->edr_offset, sih->bytes_edr);
446 if (status) {
447 pr_err("twl4030: %s, %s --> %d\n", __func__,
448 "read", status);
449 return;
450 }
451
452 /* Modify only the bits we know must change */
453 while (edge_change) {
454 int i = fls(edge_change) - 1;
455 struct irq_desc *d = irq_desc + i + agent->irq_base;
456 int byte = 1 + (i >> 2);
457 int off = (i & 0x3) * 2;
458
459 bytes[byte] &= ~(0x03 << off);
460
461 spin_lock_irq(&d->lock);
462 if (d->status & IRQ_TYPE_EDGE_RISING)
463 bytes[byte] |= BIT(off + 1);
464 if (d->status & IRQ_TYPE_EDGE_FALLING)
465 bytes[byte] |= BIT(off + 0);
466 spin_unlock_irq(&d->lock);
467
468 edge_change &= ~BIT(i);
469 }
470
471 /* Write */
472 status = twl4030_i2c_write(sih->module, bytes,
473 sih->edr_offset, sih->bytes_edr);
474 if (status)
475 pr_err("twl4030: %s, %s --> %d\n", __func__,
476 "write", status);
477}
478
479/*----------------------------------------------------------------------*/
480
481/*
482 * All irq_chip methods get issued from code holding irq_desc[irq].lock,
483 * which can't perform the underlying I2C operations (because they sleep).
484 * So we must hand them off to a thread (workqueue) and cope with asynch
485 * completion, potentially including some re-ordering, of these requests.
486 */
487
488static void twl4030_sih_mask(unsigned irq)
489{
490 struct sih_agent *sih = get_irq_chip_data(irq);
491 unsigned long flags;
492
493 spin_lock_irqsave(&sih_agent_lock, flags);
494 sih->imr |= BIT(irq - sih->irq_base);
495 sih->imr_change_pending = true;
496 queue_work(wq, &sih->mask_work);
497 spin_unlock_irqrestore(&sih_agent_lock, flags);
498}
499
500static void twl4030_sih_unmask(unsigned irq)
501{
502 struct sih_agent *sih = get_irq_chip_data(irq);
503 unsigned long flags;
504
505 spin_lock_irqsave(&sih_agent_lock, flags);
506 sih->imr &= ~BIT(irq - sih->irq_base);
507 sih->imr_change_pending = true;
508 queue_work(wq, &sih->mask_work);
509 spin_unlock_irqrestore(&sih_agent_lock, flags);
510}
511
512static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
513{
514 struct sih_agent *sih = get_irq_chip_data(irq);
515 struct irq_desc *desc = irq_desc + irq;
516 unsigned long flags;
517
518 if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
519 return -EINVAL;
520
521 spin_lock_irqsave(&sih_agent_lock, flags);
522 if ((desc->status & IRQ_TYPE_SENSE_MASK) != trigger) {
523 desc->status &= ~IRQ_TYPE_SENSE_MASK;
524 desc->status |= trigger;
525 sih->edge_change |= BIT(irq - sih->irq_base);
526 queue_work(wq, &sih->edge_work);
527 }
528 spin_unlock_irqrestore(&sih_agent_lock, flags);
529 return 0;
530}
531
532static struct irq_chip twl4030_sih_irq_chip = {
533 .name = "twl4030",
534 .mask = twl4030_sih_mask,
535 .unmask = twl4030_sih_unmask,
536 .set_type = twl4030_sih_set_type,
537};
538
539/*----------------------------------------------------------------------*/
540
541static inline int sih_read_isr(const struct sih *sih)
542{
543 int status;
544 union {
545 u8 bytes[4];
546 u32 word;
547 } isr;
548
549 /* FIXME need retry-on-error ... */
550
551 isr.word = 0;
552 status = twl4030_i2c_read(sih->module, isr.bytes,
553 sih->mask[irq_line].isr_offset, sih->bytes_ixr);
554
555 return (status < 0) ? status : le32_to_cpu(isr.word);
556}
557
558/*
559 * Generic handler for SIH interrupts ... we "know" this is called
560 * in task context, with IRQs enabled.
561 */
562static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
563{
564 struct sih_agent *agent = get_irq_data(irq);
565 const struct sih *sih = agent->sih;
566 int isr;
567
568 /* reading ISR acks the IRQs, using clear-on-read mode */
569 local_irq_enable();
570 isr = sih_read_isr(sih);
571 local_irq_disable();
572
573 if (isr < 0) {
574 pr_err("twl4030: %s SIH, read ISR error %d\n",
575 sih->name, isr);
576 /* REVISIT: recover; eventually mask it all, etc */
577 return;
578 }
579
580 while (isr) {
581 irq = fls(isr);
582 irq--;
583 isr &= ~BIT(irq);
584
585 if (irq < sih->bits)
586 generic_handle_irq(agent->irq_base + irq);
587 else
588 pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
589 sih->name, irq);
590 }
591}
592
593static unsigned twl4030_irq_next;
594
595/* returns the first IRQ used by this SIH bank,
596 * or negative errno
597 */
598int twl4030_sih_setup(int module)
599{
600 int sih_mod;
601 const struct sih *sih = NULL;
602 struct sih_agent *agent;
603 int i, irq;
604 int status = -EINVAL;
605 unsigned irq_base = twl4030_irq_next;
606
607 /* only support modules with standard clear-on-read for now */
608 for (sih_mod = 0, sih = sih_modules;
609 sih_mod < ARRAY_SIZE(sih_modules);
610 sih_mod++, sih++) {
611 if (sih->module == module && sih->set_cor) {
612 if (!WARN((irq_base + sih->bits) > NR_IRQS,
613 "irq %d for %s too big\n",
614 irq_base + sih->bits,
615 sih->name))
616 status = 0;
617 break;
618 }
619 }
620 if (status < 0)
621 return status;
622
623 agent = kzalloc(sizeof *agent, GFP_KERNEL);
624 if (!agent)
625 return -ENOMEM;
626
627 status = 0;
628
629 agent->irq_base = irq_base;
630 agent->sih = sih;
631 agent->imr = ~0;
632 INIT_WORK(&agent->mask_work, twl4030_sih_do_mask);
633 INIT_WORK(&agent->edge_work, twl4030_sih_do_edge);
634
635 for (i = 0; i < sih->bits; i++) {
636 irq = irq_base + i;
637
638 set_irq_chip_and_handler(irq, &twl4030_sih_irq_chip,
639 handle_edge_irq);
640 set_irq_chip_data(irq, agent);
641 activate_irq(irq);
642 }
643
644 status = irq_base;
645 twl4030_irq_next += i;
646
647 /* replace generic PIH handler (handle_simple_irq) */
648 irq = sih_mod + twl4030_irq_base;
649 set_irq_data(irq, agent);
650 set_irq_chained_handler(irq, handle_twl4030_sih);
651
652 pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
653 irq, irq_base, twl4030_irq_next - 1);
654
655 return status;
656}
657
658/* FIXME need a call to reverse twl4030_sih_setup() ... */
659
660
661/*----------------------------------------------------------------------*/
662
663/* FIXME pass in which interrupt line we'll use ... */
664#define twl_irq_line 0
665
666int twl_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
667{
668 static struct irq_chip twl4030_irq_chip;
669
670 int status;
671 int i;
672 struct task_struct *task;
673
674 /*
675 * Mask and clear all TWL4030 interrupts since initially we do
676 * not have any TWL4030 module interrupt handlers present
677 */
678 status = twl4030_init_sih_modules(twl_irq_line);
679 if (status < 0)
680 return status;
681
682 wq = create_singlethread_workqueue("twl4030-irqchip");
683 if (!wq) {
684 pr_err("twl4030: workqueue FAIL\n");
685 return -ESRCH;
686 }
687
688 twl4030_irq_base = irq_base;
689
690 /* install an irq handler for each of the SIH modules;
691 * clone dummy irq_chip since PIH can't *do* anything
692 */
693 twl4030_irq_chip = dummy_irq_chip;
694 twl4030_irq_chip.name = "twl4030";
695
696 twl4030_sih_irq_chip.ack = dummy_irq_chip.ack;
697
698 for (i = irq_base; i < irq_end; i++) {
699 set_irq_chip_and_handler(i, &twl4030_irq_chip,
700 handle_simple_irq);
701 activate_irq(i);
702 }
703 twl4030_irq_next = i;
704 pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", "PIH",
705 irq_num, irq_base, twl4030_irq_next - 1);
706
707 /* ... and the PWR_INT module ... */
708 status = twl4030_sih_setup(TWL4030_MODULE_INT);
709 if (status < 0) {
710 pr_err("twl4030: sih_setup PWR INT --> %d\n", status);
711 goto fail;
712 }
713
714 /* install an irq handler to demultiplex the TWL4030 interrupt */
715 task = start_twl4030_irq_thread(irq_num);
716 if (!task) {
717 pr_err("twl4030: irq thread FAIL\n");
718 status = -ESRCH;
719 goto fail;
720 }
721
722 set_irq_data(irq_num, task);
723 set_irq_chained_handler(irq_num, handle_twl4030_pih);
724
725 return status;
726
727fail:
728 for (i = irq_base; i < irq_end; i++)
729 set_irq_chip_and_handler(i, NULL, NULL);
730 destroy_workqueue(wq);
731 wq = NULL;
732 return status;
733}
734
735int twl_exit_irq(void)
736{
737 /* FIXME undo twl_init_irq() */
738 if (twl4030_irq_base) {
739 pr_err("twl4030: can't yet clean up IRQs?\n");
740 return -ENOSYS;
741 }
742 return 0;
743}
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index bf87f675e7fa..0d47fb9e4b3b 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -183,6 +183,9 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
183 (wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable) 183 (wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable)
184 | src[i - reg]; 184 | src[i - reg];
185 185
186 /* Don't store volatile bits */
187 wm8350->reg_cache[i] &= ~wm8350_reg_io_map[i].vol;
188
186 src[i - reg] = cpu_to_be16(src[i - reg]); 189 src[i - reg] = cpu_to_be16(src[i - reg]);
187 } 190 }
188 191
@@ -1120,6 +1123,7 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int mode)
1120 } 1123 }
1121 value = be16_to_cpu(value); 1124 value = be16_to_cpu(value);
1122 value &= wm8350_reg_io_map[i].readable; 1125 value &= wm8350_reg_io_map[i].readable;
1126 value &= ~wm8350_reg_io_map[i].vol;
1123 wm8350->reg_cache[i] = value; 1127 wm8350->reg_cache[i] = value;
1124 } else 1128 } else
1125 wm8350->reg_cache[i] = reg_map[i]; 1129 wm8350->reg_cache[i] = reg_map[i];
@@ -1128,7 +1132,6 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int mode)
1128out: 1132out:
1129 return ret; 1133 return ret;
1130} 1134}
1131EXPORT_SYMBOL_GPL(wm8350_create_cache);
1132 1135
1133/* 1136/*
1134 * Register a client device. This is non-fatal since there is no need to 1137 * Register a client device. This is non-fatal since there is no need to
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ad301ace6085..0b71ebc074b6 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -464,6 +464,12 @@ config MIPS_JAZZ_SONIC
464 This is the driver for the onboard card of MIPS Magnum 4000, 464 This is the driver for the onboard card of MIPS Magnum 4000,
465 Acer PICA, Olivetti M700-10 and a few other identical OEM systems. 465 Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
466 466
467config XTENSA_XT2000_SONIC
468 tristate "Xtensa XT2000 onboard SONIC Ethernet support"
469 depends on XTENSA_PLATFORM_XT2000
470 help
471 This is the driver for the onboard card of the Xtensa XT2000 board.
472
467config MIPS_AU1X00_ENET 473config MIPS_AU1X00_ENET
468 bool "MIPS AU1000 Ethernet support" 474 bool "MIPS AU1000 Ethernet support"
469 depends on SOC_AU1X00 475 depends on SOC_AU1X00
@@ -2504,6 +2510,15 @@ config PASEMI_MAC
2504 This driver supports the on-chip 1/10Gbit Ethernet controller on 2510 This driver supports the on-chip 1/10Gbit Ethernet controller on
2505 PA Semi's PWRficient line of chips. 2511 PA Semi's PWRficient line of chips.
2506 2512
2513config MLX4_EN
2514 tristate "Mellanox Technologies 10Gbit Ethernet support"
2515 depends on PCI && INET
2516 select MLX4_CORE
2517 select INET_LRO
2518 help
2519 This driver supports Mellanox Technologies ConnectX Ethernet
2520 devices.
2521
2507config MLX4_CORE 2522config MLX4_CORE
2508 tristate 2523 tristate
2509 depends on PCI 2524 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fa2510b2e609..f19acf8b9220 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -227,6 +227,8 @@ pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
227obj-$(CONFIG_MLX4_CORE) += mlx4/ 227obj-$(CONFIG_MLX4_CORE) += mlx4/
228obj-$(CONFIG_ENC28J60) += enc28j60.o 228obj-$(CONFIG_ENC28J60) += enc28j60.o
229 229
230obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
231
230obj-$(CONFIG_MACB) += macb.o 232obj-$(CONFIG_MACB) += macb.o
231 233
232obj-$(CONFIG_ARM) += arm/ 234obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 08e18bcb970f..45dd9bdc5d62 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -2,6 +2,7 @@
2 * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver 2 * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver
3 * 3 *
4 * Copyright (C) 2007 Domen Puncer, Telargo, Inc. 4 * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
5 * Copyright (C) 2008 Wolfram Sang, Pengutronix
5 * 6 *
6 * This file is licensed under the terms of the GNU General Public License 7 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any 8 * version 2. This program is licensed "as is" without any warranty of any
@@ -21,58 +22,45 @@ struct mpc52xx_fec_mdio_priv {
21 struct mpc52xx_fec __iomem *regs; 22 struct mpc52xx_fec __iomem *regs;
22}; 23};
23 24
24static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) 25static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
26 int reg, u32 value)
25{ 27{
26 struct mpc52xx_fec_mdio_priv *priv = bus->priv; 28 struct mpc52xx_fec_mdio_priv *priv = bus->priv;
27 struct mpc52xx_fec __iomem *fec; 29 struct mpc52xx_fec __iomem *fec;
28 int tries = 100; 30 int tries = 100;
29 u32 request = FEC_MII_READ_FRAME; 31
32 value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
33 value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
30 34
31 fec = priv->regs; 35 fec = priv->regs;
32 out_be32(&fec->ievent, FEC_IEVENT_MII); 36 out_be32(&fec->ievent, FEC_IEVENT_MII);
33 37 out_be32(&priv->regs->mii_data, value);
34 request |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
35 request |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
36
37 out_be32(&priv->regs->mii_data, request);
38 38
39 /* wait for it to finish, this takes about 23 us on lite5200b */ 39 /* wait for it to finish, this takes about 23 us on lite5200b */
40 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) 40 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
41 udelay(5); 41 udelay(5);
42 42
43 if (tries == 0) 43 if (!tries)
44 return -ETIMEDOUT; 44 return -ETIMEDOUT;
45 45
46 return in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK; 46 return value & FEC_MII_DATA_OP_RD ?
47 in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0;
47} 48}
48 49
49static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data) 50static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
50{ 51{
51 struct mpc52xx_fec_mdio_priv *priv = bus->priv; 52 return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME);
52 struct mpc52xx_fec __iomem *fec; 53}
53 u32 value = data;
54 int tries = 100;
55
56 fec = priv->regs;
57 out_be32(&fec->ievent, FEC_IEVENT_MII);
58
59 value |= FEC_MII_WRITE_FRAME;
60 value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
61 value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
62
63 out_be32(&priv->regs->mii_data, value);
64
65 /* wait for request to finish */
66 while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
67 udelay(5);
68
69 if (tries == 0)
70 return -ETIMEDOUT;
71 54
72 return 0; 55static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
56 u16 data)
57{
58 return mpc52xx_fec_mdio_transfer(bus, phy_id, reg,
59 data | FEC_MII_WRITE_FRAME);
73} 60}
74 61
75static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_id *match) 62static int mpc52xx_fec_mdio_probe(struct of_device *of,
63 const struct of_device_id *match)
76{ 64{
77 struct device *dev = &of->dev; 65 struct device *dev = &of->dev;
78 struct device_node *np = of->node; 66 struct device_node *np = of->node;
@@ -131,7 +119,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i
131 dev_set_drvdata(dev, bus); 119 dev_set_drvdata(dev, bus);
132 120
133 /* set MII speed */ 121 /* set MII speed */
134 out_be32(&priv->regs->mii_speed, ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1); 122 out_be32(&priv->regs->mii_speed,
123 ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1);
135 124
136 /* enable MII interrupt */ 125 /* enable MII interrupt */
137 out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII); 126 out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII);
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index efcf21c9f5c7..2ee2622258f5 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2604,8 +2604,16 @@ static int __devinit emac_init_config(struct emac_instance *dev)
2604 if (of_device_is_compatible(np, "ibm,emac-440ep") || 2604 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2605 of_device_is_compatible(np, "ibm,emac-440gr")) 2605 of_device_is_compatible(np, "ibm,emac-440gr"))
2606 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX; 2606 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2607 if (of_device_is_compatible(np, "ibm,emac-405ez")) 2607 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2608#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CONTROL
2608 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x; 2609 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2610#else
2611 printk(KERN_ERR "%s: Flow control not disabled!\n",
2612 np->full_name);
2613 return -ENXIO;
2614#endif
2615 }
2616
2609 } 2617 }
2610 2618
2611 /* Fixup some feature bits based on the device tree */ 2619 /* Fixup some feature bits based on the device tree */
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 1839d3f154a3..ecf9798987fa 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -280,9 +280,11 @@ static irqreturn_t mal_txeob(int irq, void *dev_instance)
280 mal_schedule_poll(mal); 280 mal_schedule_poll(mal);
281 set_mal_dcrn(mal, MAL_TXEOBISR, r); 281 set_mal_dcrn(mal, MAL_TXEOBISR, r);
282 282
283#ifdef CONFIG_PPC_DCR_NATIVE
283 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) 284 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
284 mtdcri(SDR0, DCRN_SDR_ICINTSTAT, 285 mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
285 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX)); 286 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
287#endif
286 288
287 return IRQ_HANDLED; 289 return IRQ_HANDLED;
288} 290}
@@ -298,9 +300,11 @@ static irqreturn_t mal_rxeob(int irq, void *dev_instance)
298 mal_schedule_poll(mal); 300 mal_schedule_poll(mal);
299 set_mal_dcrn(mal, MAL_RXEOBISR, r); 301 set_mal_dcrn(mal, MAL_RXEOBISR, r);
300 302
303#ifdef CONFIG_PPC_DCR_NATIVE
301 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT)) 304 if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
302 mtdcri(SDR0, DCRN_SDR_ICINTSTAT, 305 mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
303 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX)); 306 (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
307#endif
304 308
305 return IRQ_HANDLED; 309 return IRQ_HANDLED;
306} 310}
@@ -572,9 +576,18 @@ static int __devinit mal_probe(struct of_device *ofdev,
572 goto fail; 576 goto fail;
573 } 577 }
574 578
575 if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) 579 if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) {
580#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
581 defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
576 mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | 582 mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
577 MAL_FTR_COMMON_ERR_INT); 583 MAL_FTR_COMMON_ERR_INT);
584#else
585 printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
586 ofdev->node->full_name);
587 err = -ENODEV;
588 goto fail;
589#endif
590 }
578 591
579 mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0); 592 mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0);
580 mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1); 593 mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1);
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 0952a6528f58..a7a97bf998f8 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -1,4 +1,9 @@
1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o 1obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
2 2
3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ 3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
4 mr.o pd.o profile.o qp.o reset.o srq.o 4 mr.o pd.o port.o profile.o qp.o reset.o srq.o
5
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index b411b79d72ad..ad95d5f7b630 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -48,13 +48,16 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
48 48
49 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last); 49 obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
50 if (obj >= bitmap->max) { 50 if (obj >= bitmap->max) {
51 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; 51 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
52 & bitmap->mask;
52 obj = find_first_zero_bit(bitmap->table, bitmap->max); 53 obj = find_first_zero_bit(bitmap->table, bitmap->max);
53 } 54 }
54 55
55 if (obj < bitmap->max) { 56 if (obj < bitmap->max) {
56 set_bit(obj, bitmap->table); 57 set_bit(obj, bitmap->table);
57 bitmap->last = (obj + 1) & (bitmap->max - 1); 58 bitmap->last = (obj + 1);
59 if (bitmap->last == bitmap->max)
60 bitmap->last = 0;
58 obj |= bitmap->top; 61 obj |= bitmap->top;
59 } else 62 } else
60 obj = -1; 63 obj = -1;
@@ -66,16 +69,90 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
66 69
67void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj) 70void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
68{ 71{
69 obj &= bitmap->max - 1; 72 mlx4_bitmap_free_range(bitmap, obj, 1);
73}
74
75static unsigned long find_aligned_range(unsigned long *bitmap,
76 u32 start, u32 nbits,
77 int len, int align)
78{
79 unsigned long end, i;
80
81again:
82 start = ALIGN(start, align);
83
84 while ((start < nbits) && test_bit(start, bitmap))
85 start += align;
86
87 if (start >= nbits)
88 return -1;
89
90 end = start+len;
91 if (end > nbits)
92 return -1;
93
94 for (i = start + 1; i < end; i++) {
95 if (test_bit(i, bitmap)) {
96 start = i + 1;
97 goto again;
98 }
99 }
100
101 return start;
102}
103
104u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
105{
106 u32 obj, i;
107
108 if (likely(cnt == 1 && align == 1))
109 return mlx4_bitmap_alloc(bitmap);
110
111 spin_lock(&bitmap->lock);
112
113 obj = find_aligned_range(bitmap->table, bitmap->last,
114 bitmap->max, cnt, align);
115 if (obj >= bitmap->max) {
116 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
117 & bitmap->mask;
118 obj = find_aligned_range(bitmap->table, 0, bitmap->max,
119 cnt, align);
120 }
121
122 if (obj < bitmap->max) {
123 for (i = 0; i < cnt; i++)
124 set_bit(obj + i, bitmap->table);
125 if (obj == bitmap->last) {
126 bitmap->last = (obj + cnt);
127 if (bitmap->last >= bitmap->max)
128 bitmap->last = 0;
129 }
130 obj |= bitmap->top;
131 } else
132 obj = -1;
133
134 spin_unlock(&bitmap->lock);
135
136 return obj;
137}
138
139void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
140{
141 u32 i;
142
143 obj &= bitmap->max + bitmap->reserved_top - 1;
70 144
71 spin_lock(&bitmap->lock); 145 spin_lock(&bitmap->lock);
72 clear_bit(obj, bitmap->table); 146 for (i = 0; i < cnt; i++)
147 clear_bit(obj + i, bitmap->table);
73 bitmap->last = min(bitmap->last, obj); 148 bitmap->last = min(bitmap->last, obj);
74 bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask; 149 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
150 & bitmap->mask;
75 spin_unlock(&bitmap->lock); 151 spin_unlock(&bitmap->lock);
76} 152}
77 153
78int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved) 154int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
155 u32 reserved_bot, u32 reserved_top)
79{ 156{
80 int i; 157 int i;
81 158
@@ -85,14 +162,16 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved
85 162
86 bitmap->last = 0; 163 bitmap->last = 0;
87 bitmap->top = 0; 164 bitmap->top = 0;
88 bitmap->max = num; 165 bitmap->max = num - reserved_top;
89 bitmap->mask = mask; 166 bitmap->mask = mask;
167 bitmap->reserved_top = reserved_top;
90 spin_lock_init(&bitmap->lock); 168 spin_lock_init(&bitmap->lock);
91 bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL); 169 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
170 sizeof (long), GFP_KERNEL);
92 if (!bitmap->table) 171 if (!bitmap->table)
93 return -ENOMEM; 172 return -ENOMEM;
94 173
95 for (i = 0; i < reserved; ++i) 174 for (i = 0; i < reserved_bot; ++i)
96 set_bit(i, bitmap->table); 175 set_bit(i, bitmap->table);
97 176
98 return 0; 177 return 0;
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 9bb50e3f8974..b7ad2829d67e 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -300,7 +300,7 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
300 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); 300 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
301 301
302 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, 302 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
303 dev->caps.num_cqs - 1, dev->caps.reserved_cqs); 303 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
304 if (err) 304 if (err)
305 return err; 305 return err;
306 306
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
new file mode 100644
index 000000000000..1368a8010af4
--- /dev/null
+++ b/drivers/net/mlx4/en_cq.c
@@ -0,0 +1,146 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
36#include <linux/mlx4/cmd.h>
37
38#include "mlx4_en.h"
39
40static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
41{
42 return;
43}
44
45
46int mlx4_en_create_cq(struct mlx4_en_priv *priv,
47 struct mlx4_en_cq *cq,
48 int entries, int ring, enum cq_type mode)
49{
50 struct mlx4_en_dev *mdev = priv->mdev;
51 int err;
52
53 cq->size = entries;
54 if (mode == RX)
55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
56 else
57 cq->buf_size = sizeof(struct mlx4_cqe);
58
59 cq->ring = ring;
60 cq->is_tx = mode;
61 spin_lock_init(&cq->lock);
62
63 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
64 cq->buf_size, 2 * PAGE_SIZE);
65 if (err)
66 return err;
67
68 err = mlx4_en_map_buffer(&cq->wqres.buf);
69 if (err)
70 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
71
72 return err;
73}
74
75int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
76{
77 struct mlx4_en_dev *mdev = priv->mdev;
78 int err;
79
80 cq->dev = mdev->pndev[priv->port];
81 cq->mcq.set_ci_db = cq->wqres.db.db;
82 cq->mcq.arm_db = cq->wqres.db.db + 1;
83 *cq->mcq.set_ci_db = 0;
84 *cq->mcq.arm_db = 0;
85 cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
86 memset(cq->buf, 0, cq->buf_size);
87
88 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
89 cq->wqres.db.dma, &cq->mcq, cq->is_tx);
90 if (err)
91 return err;
92
93 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
94 cq->mcq.event = mlx4_en_cq_event;
95
96 if (cq->is_tx) {
97 init_timer(&cq->timer);
98 cq->timer.function = mlx4_en_poll_tx_cq;
99 cq->timer.data = (unsigned long) cq;
100 } else {
101 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
102 napi_enable(&cq->napi);
103 }
104
105 return 0;
106}
107
108void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
109{
110 struct mlx4_en_dev *mdev = priv->mdev;
111
112 mlx4_en_unmap_buffer(&cq->wqres.buf);
113 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
114 cq->buf_size = 0;
115 cq->buf = NULL;
116}
117
118void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
119{
120 struct mlx4_en_dev *mdev = priv->mdev;
121
122 if (cq->is_tx)
123 del_timer(&cq->timer);
124 else
125 napi_disable(&cq->napi);
126
127 mlx4_cq_free(mdev->dev, &cq->mcq);
128}
129
130/* Set rx cq moderation parameters */
131int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
132{
133 return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
134 cq->moder_cnt, cq->moder_time);
135}
136
137int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
138{
139 cq->armed = 1;
140 mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
141 &priv->mdev->uar_lock);
142
143 return 0;
144}
145
146
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
new file mode 100644
index 000000000000..1b0eebf84f76
--- /dev/null
+++ b/drivers/net/mlx4/en_main.c
@@ -0,0 +1,254 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/cpumask.h>
35#include <linux/module.h>
36#include <linux/delay.h>
37#include <linux/netdevice.h>
38#include <linux/cpumask.h>
39
40#include <linux/mlx4/driver.h>
41#include <linux/mlx4/device.h>
42#include <linux/mlx4/cmd.h>
43
44#include "mlx4_en.h"
45
46MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
47MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
48MODULE_LICENSE("Dual BSD/GPL");
49MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
50
51static const char mlx4_en_version[] =
52 DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
53 DRV_VERSION " (" DRV_RELDATE ")\n";
54
55static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
56 enum mlx4_dev_event event, int port)
57{
58 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
59 struct mlx4_en_priv *priv;
60
61 if (!mdev->pndev[port])
62 return;
63
64 priv = netdev_priv(mdev->pndev[port]);
65 switch (event) {
66 case MLX4_DEV_EVENT_PORT_UP:
67 case MLX4_DEV_EVENT_PORT_DOWN:
68 /* To prevent races, we poll the link state in a separate
69 task rather than changing it here */
70 priv->link_state = event;
71 queue_work(mdev->workqueue, &priv->linkstate_task);
72 break;
73
74 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
75 mlx4_err(mdev, "Internal error detected, restarting device\n");
76 break;
77
78 default:
79 mlx4_warn(mdev, "Unhandled event: %d\n", event);
80 }
81}
82
83static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
84{
85 struct mlx4_en_dev *mdev = endev_ptr;
86 int i;
87
88 mutex_lock(&mdev->state_lock);
89 mdev->device_up = false;
90 mutex_unlock(&mdev->state_lock);
91
92 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
93 if (mdev->pndev[i])
94 mlx4_en_destroy_netdev(mdev->pndev[i]);
95
96 flush_workqueue(mdev->workqueue);
97 destroy_workqueue(mdev->workqueue);
98 mlx4_mr_free(dev, &mdev->mr);
99 mlx4_uar_free(dev, &mdev->priv_uar);
100 mlx4_pd_free(dev, mdev->priv_pdn);
101 kfree(mdev);
102}
103
104static void *mlx4_en_add(struct mlx4_dev *dev)
105{
106 static int mlx4_en_version_printed;
107 struct mlx4_en_dev *mdev;
108 int i;
109 int err;
110
111 if (!mlx4_en_version_printed) {
112 printk(KERN_INFO "%s", mlx4_en_version);
113 mlx4_en_version_printed++;
114 }
115
116 mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
117 if (!mdev) {
118 dev_err(&dev->pdev->dev, "Device struct alloc failed, "
119 "aborting.\n");
120 err = -ENOMEM;
121 goto err_free_res;
122 }
123
124 if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
125 goto err_free_dev;
126
127 if (mlx4_uar_alloc(dev, &mdev->priv_uar))
128 goto err_pd;
129
130 mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
131 if (!mdev->uar_map)
132 goto err_uar;
133 spin_lock_init(&mdev->uar_lock);
134
135 mdev->dev = dev;
136 mdev->dma_device = &(dev->pdev->dev);
137 mdev->pdev = dev->pdev;
138 mdev->device_up = false;
139
140 mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
141 if (!mdev->LSO_support)
142 mlx4_warn(mdev, "LSO not supported, please upgrade to later "
143 "FW version to enable LSO\n");
144
145 if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
146 MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
147 0, 0, &mdev->mr)) {
148 mlx4_err(mdev, "Failed allocating memory region\n");
149 goto err_uar;
150 }
151 if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
152 mlx4_err(mdev, "Failed enabling memory region\n");
153 goto err_mr;
154 }
155
156 /* Build device profile according to supplied module parameters */
157 err = mlx4_en_get_profile(mdev);
158 if (err) {
159 mlx4_err(mdev, "Bad module parameters, aborting.\n");
160 goto err_mr;
161 }
162
163 /* Configure wich ports to start according to module parameters */
164 mdev->port_cnt = 0;
165 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
166 mdev->port_cnt++;
167
168 /* If we did not receive an explicit number of Rx rings, default to
169 * the number of completion vectors populated by the mlx4_core */
170 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
171 mlx4_info(mdev, "Using %d tx rings for port:%d\n",
172 mdev->profile.prof[i].tx_ring_num, i);
173 if (!mdev->profile.prof[i].rx_ring_num) {
174 mdev->profile.prof[i].rx_ring_num = 1;
175 mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
176 1, i);
177 } else
178 mlx4_info(mdev, "Using %d rx rings for port:%d\n",
179 mdev->profile.prof[i].rx_ring_num, i);
180 }
181
182 /* Create our own workqueue for reset/multicast tasks
183 * Note: we cannot use the shared workqueue because of deadlocks caused
184 * by the rtnl lock */
185 mdev->workqueue = create_singlethread_workqueue("mlx4_en");
186 if (!mdev->workqueue) {
187 err = -ENOMEM;
188 goto err_close_nic;
189 }
190
191 /* At this stage all non-port specific tasks are complete:
192 * mark the card state as up */
193 mutex_init(&mdev->state_lock);
194 mdev->device_up = true;
195
196 /* Setup ports */
197
198 /* Create a netdev for each port */
199 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
200 mlx4_info(mdev, "Activating port:%d\n", i);
201 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) {
202 mdev->pndev[i] = NULL;
203 goto err_free_netdev;
204 }
205 }
206 return mdev;
207
208
209err_free_netdev:
210 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
211 if (mdev->pndev[i])
212 mlx4_en_destroy_netdev(mdev->pndev[i]);
213 }
214
215 mutex_lock(&mdev->state_lock);
216 mdev->device_up = false;
217 mutex_unlock(&mdev->state_lock);
218 flush_workqueue(mdev->workqueue);
219
220 /* Stop event queue before we drop down to release shared SW state */
221
222err_close_nic:
223 destroy_workqueue(mdev->workqueue);
224err_mr:
225 mlx4_mr_free(dev, &mdev->mr);
226err_uar:
227 mlx4_uar_free(dev, &mdev->priv_uar);
228err_pd:
229 mlx4_pd_free(dev, mdev->priv_pdn);
230err_free_dev:
231 kfree(mdev);
232err_free_res:
233 return NULL;
234}
235
236static struct mlx4_interface mlx4_en_interface = {
237 .add = mlx4_en_add,
238 .remove = mlx4_en_remove,
239 .event = mlx4_en_event,
240};
241
242static int __init mlx4_en_init(void)
243{
244 return mlx4_register_interface(&mlx4_en_interface);
245}
246
247static void __exit mlx4_en_cleanup(void)
248{
249 mlx4_unregister_interface(&mlx4_en_interface);
250}
251
252module_init(mlx4_en_init);
253module_exit(mlx4_en_cleanup);
254
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
new file mode 100644
index 000000000000..a339afbeed38
--- /dev/null
+++ b/drivers/net/mlx4/en_netdev.c
@@ -0,0 +1,1088 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/etherdevice.h>
35#include <linux/tcp.h>
36#include <linux/if_vlan.h>
37#include <linux/delay.h>
38
39#include <linux/mlx4/driver.h>
40#include <linux/mlx4/device.h>
41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/cq.h>
43
44#include "mlx4_en.h"
45#include "en_port.h"
46
47
48static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
49{
50 struct mlx4_en_priv *priv = netdev_priv(dev);
51 struct mlx4_en_dev *mdev = priv->mdev;
52 int err;
53
54 mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
55 priv->vlgrp = grp;
56
57 mutex_lock(&mdev->state_lock);
58 if (mdev->device_up && priv->port_up) {
59 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
60 if (err)
61 mlx4_err(mdev, "Failed configuring VLAN filter\n");
62 }
63 mutex_unlock(&mdev->state_lock);
64}
65
66static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
67{
68 struct mlx4_en_priv *priv = netdev_priv(dev);
69 struct mlx4_en_dev *mdev = priv->mdev;
70 int err;
71
72 if (!priv->vlgrp)
73 return;
74
75 mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
76 vid, vlan_group_get_device(priv->vlgrp, vid));
77
78 /* Add VID to port VLAN filter */
79 mutex_lock(&mdev->state_lock);
80 if (mdev->device_up && priv->port_up) {
81 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
82 if (err)
83 mlx4_err(mdev, "Failed configuring VLAN filter\n");
84 }
85 mutex_unlock(&mdev->state_lock);
86}
87
88static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
89{
90 struct mlx4_en_priv *priv = netdev_priv(dev);
91 struct mlx4_en_dev *mdev = priv->mdev;
92 int err;
93
94 if (!priv->vlgrp)
95 return;
96
97 mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp "
98 "entry:%p)\n", vid, priv->vlgrp,
99 vlan_group_get_device(priv->vlgrp, vid));
100 vlan_group_set_device(priv->vlgrp, vid, NULL);
101
102 /* Remove VID from port VLAN filter */
103 mutex_lock(&mdev->state_lock);
104 if (mdev->device_up && priv->port_up) {
105 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106 if (err)
107 mlx4_err(mdev, "Failed configuring VLAN filter\n");
108 }
109 mutex_unlock(&mdev->state_lock);
110}
111
112static u64 mlx4_en_mac_to_u64(u8 *addr)
113{
114 u64 mac = 0;
115 int i;
116
117 for (i = 0; i < ETH_ALEN; i++) {
118 mac <<= 8;
119 mac |= addr[i];
120 }
121 return mac;
122}
123
124static int mlx4_en_set_mac(struct net_device *dev, void *addr)
125{
126 struct mlx4_en_priv *priv = netdev_priv(dev);
127 struct mlx4_en_dev *mdev = priv->mdev;
128 struct sockaddr *saddr = addr;
129
130 if (!is_valid_ether_addr(saddr->sa_data))
131 return -EADDRNOTAVAIL;
132
133 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
134 priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
135 queue_work(mdev->workqueue, &priv->mac_task);
136 return 0;
137}
138
139static void mlx4_en_do_set_mac(struct work_struct *work)
140{
141 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
142 mac_task);
143 struct mlx4_en_dev *mdev = priv->mdev;
144 int err = 0;
145
146 mutex_lock(&mdev->state_lock);
147 if (priv->port_up) {
148 /* Remove old MAC and insert the new one */
149 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
150 err = mlx4_register_mac(mdev->dev, priv->port,
151 priv->mac, &priv->mac_index);
152 if (err)
153 mlx4_err(mdev, "Failed changing HW MAC address\n");
154 } else
155 mlx4_dbg(HW, priv, "Port is down, exiting...\n");
156
157 mutex_unlock(&mdev->state_lock);
158}
159
160static void mlx4_en_clear_list(struct net_device *dev)
161{
162 struct mlx4_en_priv *priv = netdev_priv(dev);
163 struct dev_mc_list *plist = priv->mc_list;
164 struct dev_mc_list *next;
165
166 while (plist) {
167 next = plist->next;
168 kfree(plist);
169 plist = next;
170 }
171 priv->mc_list = NULL;
172}
173
174static void mlx4_en_cache_mclist(struct net_device *dev)
175{
176 struct mlx4_en_priv *priv = netdev_priv(dev);
177 struct mlx4_en_dev *mdev = priv->mdev;
178 struct dev_mc_list *mclist;
179 struct dev_mc_list *tmp;
180 struct dev_mc_list *plist = NULL;
181
182 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
183 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
184 if (!tmp) {
185 mlx4_err(mdev, "failed to allocate multicast list\n");
186 mlx4_en_clear_list(dev);
187 return;
188 }
189 memcpy(tmp, mclist, sizeof(struct dev_mc_list));
190 tmp->next = NULL;
191 if (plist)
192 plist->next = tmp;
193 else
194 priv->mc_list = tmp;
195 plist = tmp;
196 }
197}
198
199
200static void mlx4_en_set_multicast(struct net_device *dev)
201{
202 struct mlx4_en_priv *priv = netdev_priv(dev);
203
204 if (!priv->port_up)
205 return;
206
207 queue_work(priv->mdev->workqueue, &priv->mcast_task);
208}
209
210static void mlx4_en_do_set_multicast(struct work_struct *work)
211{
212 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
213 mcast_task);
214 struct mlx4_en_dev *mdev = priv->mdev;
215 struct net_device *dev = priv->dev;
216 struct dev_mc_list *mclist;
217 u64 mcast_addr = 0;
218 int err;
219
220 mutex_lock(&mdev->state_lock);
221 if (!mdev->device_up) {
222 mlx4_dbg(HW, priv, "Card is not up, ignoring "
223 "multicast change.\n");
224 goto out;
225 }
226 if (!priv->port_up) {
227 mlx4_dbg(HW, priv, "Port is down, ignoring "
228 "multicast change.\n");
229 goto out;
230 }
231
232 /*
233 * Promsicuous mode: disable all filters
234 */
235
236 if (dev->flags & IFF_PROMISC) {
237 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
238 if (netif_msg_rx_status(priv))
239 mlx4_warn(mdev, "Port:%d entering promiscuous mode\n",
240 priv->port);
241 priv->flags |= MLX4_EN_FLAG_PROMISC;
242
243 /* Enable promiscouos mode */
244 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
245 priv->base_qpn, 1);
246 if (err)
247 mlx4_err(mdev, "Failed enabling "
248 "promiscous mode\n");
249
250 /* Disable port multicast filter (unconditionally) */
251 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
252 0, MLX4_MCAST_DISABLE);
253 if (err)
254 mlx4_err(mdev, "Failed disabling "
255 "multicast filter\n");
256
257 /* Disable port VLAN filter */
258 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
259 if (err)
260 mlx4_err(mdev, "Failed disabling "
261 "VLAN filter\n");
262 }
263 goto out;
264 }
265
266 /*
267 * Not in promiscous mode
268 */
269
270 if (priv->flags & MLX4_EN_FLAG_PROMISC) {
271 if (netif_msg_rx_status(priv))
272 mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n",
273 priv->port);
274 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
275
276 /* Disable promiscouos mode */
277 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
278 priv->base_qpn, 0);
279 if (err)
280 mlx4_err(mdev, "Failed disabling promiscous mode\n");
281
282 /* Enable port VLAN filter */
283 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
284 if (err)
285 mlx4_err(mdev, "Failed enabling VLAN filter\n");
286 }
287
288 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
289 if (dev->flags & IFF_ALLMULTI) {
290 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
291 0, MLX4_MCAST_DISABLE);
292 if (err)
293 mlx4_err(mdev, "Failed disabling multicast filter\n");
294 } else {
295 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
296 0, MLX4_MCAST_DISABLE);
297 if (err)
298 mlx4_err(mdev, "Failed disabling multicast filter\n");
299
300 /* Flush mcast filter and init it with broadcast address */
301 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
302 1, MLX4_MCAST_CONFIG);
303
304 /* Update multicast list - we cache all addresses so they won't
305 * change while HW is updated holding the command semaphor */
306 netif_tx_lock_bh(dev);
307 mlx4_en_cache_mclist(dev);
308 netif_tx_unlock_bh(dev);
309 for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
310 mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr);
311 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
312 mcast_addr, 0, MLX4_MCAST_CONFIG);
313 }
314 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
315 0, MLX4_MCAST_ENABLE);
316 if (err)
317 mlx4_err(mdev, "Failed enabling multicast filter\n");
318
319 mlx4_en_clear_list(dev);
320 }
321out:
322 mutex_unlock(&mdev->state_lock);
323}
324
325#ifdef CONFIG_NET_POLL_CONTROLLER
326static void mlx4_en_netpoll(struct net_device *dev)
327{
328 struct mlx4_en_priv *priv = netdev_priv(dev);
329 struct mlx4_en_cq *cq;
330 unsigned long flags;
331 int i;
332
333 for (i = 0; i < priv->rx_ring_num; i++) {
334 cq = &priv->rx_cq[i];
335 spin_lock_irqsave(&cq->lock, flags);
336 napi_synchronize(&cq->napi);
337 mlx4_en_process_rx_cq(dev, cq, 0);
338 spin_unlock_irqrestore(&cq->lock, flags);
339 }
340}
341#endif
342
343static void mlx4_en_tx_timeout(struct net_device *dev)
344{
345 struct mlx4_en_priv *priv = netdev_priv(dev);
346 struct mlx4_en_dev *mdev = priv->mdev;
347
348 if (netif_msg_timer(priv))
349 mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
350
351 if (netif_carrier_ok(dev)) {
352 priv->port_stats.tx_timeout++;
353 mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
354 queue_work(mdev->workqueue, &priv->watchdog_task);
355 }
356}
357
358
359static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
360{
361 struct mlx4_en_priv *priv = netdev_priv(dev);
362
363 spin_lock_bh(&priv->stats_lock);
364 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
365 spin_unlock_bh(&priv->stats_lock);
366
367 return &priv->ret_stats;
368}
369
370static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
371{
372 struct mlx4_en_dev *mdev = priv->mdev;
373 struct mlx4_en_cq *cq;
374 int i;
375
376 /* If we haven't received a specific coalescing setting
377 * (module param), we set the moderation paramters as follows:
378 * - moder_cnt is set to the number of mtu sized packets to
379 * satisfy our coelsing target.
380 * - moder_time is set to a fixed value.
381 */
382 priv->rx_frames = (mdev->profile.rx_moder_cnt ==
383 MLX4_EN_AUTO_CONF) ?
384 MLX4_EN_RX_COAL_TARGET /
385 priv->dev->mtu + 1 :
386 mdev->profile.rx_moder_cnt;
387 priv->rx_usecs = (mdev->profile.rx_moder_time ==
388 MLX4_EN_AUTO_CONF) ?
389 MLX4_EN_RX_COAL_TIME :
390 mdev->profile.rx_moder_time;
391 mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
392 "rx_frames:%d rx_usecs:%d\n",
393 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
394
395 /* Setup cq moderation params */
396 for (i = 0; i < priv->rx_ring_num; i++) {
397 cq = &priv->rx_cq[i];
398 cq->moder_cnt = priv->rx_frames;
399 cq->moder_time = priv->rx_usecs;
400 }
401
402 for (i = 0; i < priv->tx_ring_num; i++) {
403 cq = &priv->tx_cq[i];
404 cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
405 cq->moder_time = MLX4_EN_TX_COAL_TIME;
406 }
407
408 /* Reset auto-moderation params */
409 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
410 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
411 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
412 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
413 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
414 priv->adaptive_rx_coal = mdev->profile.auto_moder;
415 priv->last_moder_time = MLX4_EN_AUTO_CONF;
416 priv->last_moder_jiffies = 0;
417 priv->last_moder_packets = 0;
418 priv->last_moder_tx_packets = 0;
419 priv->last_moder_bytes = 0;
420}
421
422static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
423{
424 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
425 struct mlx4_en_dev *mdev = priv->mdev;
426 struct mlx4_en_cq *cq;
427 unsigned long packets;
428 unsigned long rate;
429 unsigned long avg_pkt_size;
430 unsigned long rx_packets;
431 unsigned long rx_bytes;
432 unsigned long tx_packets;
433 unsigned long tx_pkt_diff;
434 unsigned long rx_pkt_diff;
435 int moder_time;
436 int i, err;
437
438 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
439 return;
440
441 spin_lock_bh(&priv->stats_lock);
442 rx_packets = priv->stats.rx_packets;
443 rx_bytes = priv->stats.rx_bytes;
444 tx_packets = priv->stats.tx_packets;
445 spin_unlock_bh(&priv->stats_lock);
446
447 if (!priv->last_moder_jiffies || !period)
448 goto out;
449
450 tx_pkt_diff = ((unsigned long) (tx_packets -
451 priv->last_moder_tx_packets));
452 rx_pkt_diff = ((unsigned long) (rx_packets -
453 priv->last_moder_packets));
454 packets = max(tx_pkt_diff, rx_pkt_diff);
455 rate = packets * HZ / period;
456 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
457 priv->last_moder_bytes)) / packets : 0;
458
459 /* Apply auto-moderation only when packet rate exceeds a rate that
460 * it matters */
461 if (rate > MLX4_EN_RX_RATE_THRESH) {
462 /* If tx and rx packet rates are not balanced, assume that
463 * traffic is mainly BW bound and apply maximum moderation.
464 * Otherwise, moderate according to packet rate */
465 if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
466 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
467 moder_time = priv->rx_usecs_high;
468 } else {
469 if (rate < priv->pkt_rate_low)
470 moder_time = priv->rx_usecs_low;
471 else if (rate > priv->pkt_rate_high)
472 moder_time = priv->rx_usecs_high;
473 else
474 moder_time = (rate - priv->pkt_rate_low) *
475 (priv->rx_usecs_high - priv->rx_usecs_low) /
476 (priv->pkt_rate_high - priv->pkt_rate_low) +
477 priv->rx_usecs_low;
478 }
479 } else {
480 /* When packet rate is low, use default moderation rather than
481 * 0 to prevent interrupt storms if traffic suddenly increases */
482 moder_time = priv->rx_usecs;
483 }
484
485 mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
486 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
487
488 mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
489 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
490 priv->last_moder_time, moder_time, period, packets,
491 avg_pkt_size, rate);
492
493 if (moder_time != priv->last_moder_time) {
494 priv->last_moder_time = moder_time;
495 for (i = 0; i < priv->rx_ring_num; i++) {
496 cq = &priv->rx_cq[i];
497 cq->moder_time = moder_time;
498 err = mlx4_en_set_cq_moder(priv, cq);
499 if (err) {
500 mlx4_err(mdev, "Failed modifying moderation for cq:%d "
501 "on port:%d\n", i, priv->port);
502 break;
503 }
504 }
505 }
506
507out:
508 priv->last_moder_packets = rx_packets;
509 priv->last_moder_tx_packets = tx_packets;
510 priv->last_moder_bytes = rx_bytes;
511 priv->last_moder_jiffies = jiffies;
512}
513
514static void mlx4_en_do_get_stats(struct work_struct *work)
515{
516 struct delayed_work *delay = container_of(work, struct delayed_work, work);
517 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
518 stats_task);
519 struct mlx4_en_dev *mdev = priv->mdev;
520 int err;
521
522 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
523 if (err)
524 mlx4_dbg(HW, priv, "Could not update stats for "
525 "port:%d\n", priv->port);
526
527 mutex_lock(&mdev->state_lock);
528 if (mdev->device_up) {
529 if (priv->port_up)
530 mlx4_en_auto_moderation(priv);
531
532 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
533 }
534 mutex_unlock(&mdev->state_lock);
535}
536
537static void mlx4_en_linkstate(struct work_struct *work)
538{
539 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
540 linkstate_task);
541 struct mlx4_en_dev *mdev = priv->mdev;
542 int linkstate = priv->link_state;
543
544 mutex_lock(&mdev->state_lock);
545 /* If observable port state changed set carrier state and
546 * report to system log */
547 if (priv->last_link_state != linkstate) {
548 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
549 if (netif_msg_link(priv))
550 mlx4_info(mdev, "Port %d - link down\n", priv->port);
551 netif_carrier_off(priv->dev);
552 } else {
553 if (netif_msg_link(priv))
554 mlx4_info(mdev, "Port %d - link up\n", priv->port);
555 netif_carrier_on(priv->dev);
556 }
557 }
558 priv->last_link_state = linkstate;
559 mutex_unlock(&mdev->state_lock);
560}
561
562
563static int mlx4_en_start_port(struct net_device *dev)
564{
565 struct mlx4_en_priv *priv = netdev_priv(dev);
566 struct mlx4_en_dev *mdev = priv->mdev;
567 struct mlx4_en_cq *cq;
568 struct mlx4_en_tx_ring *tx_ring;
569 struct mlx4_en_rx_ring *rx_ring;
570 int rx_index = 0;
571 int tx_index = 0;
572 u16 stride;
573 int err = 0;
574 int i;
575 int j;
576
577 if (priv->port_up) {
578 mlx4_dbg(DRV, priv, "start port called while port already up\n");
579 return 0;
580 }
581
582 /* Calculate Rx buf size */
583 dev->mtu = min(dev->mtu, priv->max_mtu);
584 mlx4_en_calc_rx_buf(dev);
585 mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
586 stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
587 DS_SIZE * priv->num_frags);
588 /* Configure rx cq's and rings */
589 for (i = 0; i < priv->rx_ring_num; i++) {
590 cq = &priv->rx_cq[i];
591 rx_ring = &priv->rx_ring[i];
592
593 err = mlx4_en_activate_cq(priv, cq);
594 if (err) {
595 mlx4_err(mdev, "Failed activating Rx CQ\n");
596 goto rx_err;
597 }
598 for (j = 0; j < cq->size; j++)
599 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
600 err = mlx4_en_set_cq_moder(priv, cq);
601 if (err) {
602 mlx4_err(mdev, "Failed setting cq moderation parameters");
603 mlx4_en_deactivate_cq(priv, cq);
604 goto cq_err;
605 }
606 mlx4_en_arm_cq(priv, cq);
607
608 ++rx_index;
609 }
610
611 err = mlx4_en_activate_rx_rings(priv);
612 if (err) {
613 mlx4_err(mdev, "Failed to activate RX rings\n");
614 goto cq_err;
615 }
616
617 err = mlx4_en_config_rss_steer(priv);
618 if (err) {
619 mlx4_err(mdev, "Failed configuring rss steering\n");
620 goto rx_err;
621 }
622
623 /* Configure tx cq's and rings */
624 for (i = 0; i < priv->tx_ring_num; i++) {
625 /* Configure cq */
626 cq = &priv->tx_cq[i];
627 err = mlx4_en_activate_cq(priv, cq);
628 if (err) {
629 mlx4_err(mdev, "Failed allocating Tx CQ\n");
630 goto tx_err;
631 }
632 err = mlx4_en_set_cq_moder(priv, cq);
633 if (err) {
634 mlx4_err(mdev, "Failed setting cq moderation parameters");
635 mlx4_en_deactivate_cq(priv, cq);
636 goto tx_err;
637 }
638 mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
639 cq->buf->wqe_index = cpu_to_be16(0xffff);
640
641 /* Configure ring */
642 tx_ring = &priv->tx_ring[i];
643 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
644 priv->rx_ring[0].srq.srqn);
645 if (err) {
646 mlx4_err(mdev, "Failed allocating Tx ring\n");
647 mlx4_en_deactivate_cq(priv, cq);
648 goto tx_err;
649 }
650 /* Set initial ownership of all Tx TXBBs to SW (1) */
651 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
652 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
653 ++tx_index;
654 }
655
656 /* Configure port */
657 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
658 priv->rx_skb_size + ETH_FCS_LEN,
659 mdev->profile.tx_pause,
660 mdev->profile.tx_ppp,
661 mdev->profile.rx_pause,
662 mdev->profile.rx_ppp);
663 if (err) {
664 mlx4_err(mdev, "Failed setting port general configurations"
665 " for port %d, with error %d\n", priv->port, err);
666 goto tx_err;
667 }
668 /* Set default qp number */
669 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
670 if (err) {
671 mlx4_err(mdev, "Failed setting default qp numbers\n");
672 goto tx_err;
673 }
674 /* Set port mac number */
675 mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
676 err = mlx4_register_mac(mdev->dev, priv->port,
677 priv->mac, &priv->mac_index);
678 if (err) {
679 mlx4_err(mdev, "Failed setting port mac\n");
680 goto tx_err;
681 }
682
683 /* Init port */
684 mlx4_dbg(HW, priv, "Initializing port\n");
685 err = mlx4_INIT_PORT(mdev->dev, priv->port);
686 if (err) {
687 mlx4_err(mdev, "Failed Initializing port\n");
688 goto mac_err;
689 }
690
691 /* Schedule multicast task to populate multicast list */
692 queue_work(mdev->workqueue, &priv->mcast_task);
693
694 priv->port_up = true;
695 netif_start_queue(dev);
696 return 0;
697
698mac_err:
699 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
700tx_err:
701 while (tx_index--) {
702 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
703 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
704 }
705
706 mlx4_en_release_rss_steer(priv);
707rx_err:
708 for (i = 0; i < priv->rx_ring_num; i++)
709 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[rx_index]);
710cq_err:
711 while (rx_index--)
712 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
713
714 return err; /* need to close devices */
715}
716
717
718static void mlx4_en_stop_port(struct net_device *dev)
719{
720 struct mlx4_en_priv *priv = netdev_priv(dev);
721 struct mlx4_en_dev *mdev = priv->mdev;
722 int i;
723
724 if (!priv->port_up) {
725 mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n",
726 priv->port);
727 return;
728 }
729 netif_stop_queue(dev);
730
731 /* Synchronize with tx routine */
732 netif_tx_lock_bh(dev);
733 priv->port_up = false;
734 netif_tx_unlock_bh(dev);
735
736 /* close port*/
737 mlx4_CLOSE_PORT(mdev->dev, priv->port);
738
739 /* Unregister Mac address for the port */
740 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
741
742 /* Free TX Rings */
743 for (i = 0; i < priv->tx_ring_num; i++) {
744 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
745 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
746 }
747 msleep(10);
748
749 for (i = 0; i < priv->tx_ring_num; i++)
750 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
751
752 /* Free RSS qps */
753 mlx4_en_release_rss_steer(priv);
754
755 /* Free RX Rings */
756 for (i = 0; i < priv->rx_ring_num; i++) {
757 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
758 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
759 msleep(1);
760 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
761 }
762}
763
764static void mlx4_en_restart(struct work_struct *work)
765{
766 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
767 watchdog_task);
768 struct mlx4_en_dev *mdev = priv->mdev;
769 struct net_device *dev = priv->dev;
770
771 mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
772 mlx4_en_stop_port(dev);
773 if (mlx4_en_start_port(dev))
774 mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
775}
776
777
778static int mlx4_en_open(struct net_device *dev)
779{
780 struct mlx4_en_priv *priv = netdev_priv(dev);
781 struct mlx4_en_dev *mdev = priv->mdev;
782 int i;
783 int err = 0;
784
785 mutex_lock(&mdev->state_lock);
786
787 if (!mdev->device_up) {
788 mlx4_err(mdev, "Cannot open - device down/disabled\n");
789 err = -EBUSY;
790 goto out;
791 }
792
793 /* Reset HW statistics and performance counters */
794 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
795 mlx4_dbg(HW, priv, "Failed dumping statistics\n");
796
797 memset(&priv->stats, 0, sizeof(priv->stats));
798 memset(&priv->pstats, 0, sizeof(priv->pstats));
799
800 for (i = 0; i < priv->tx_ring_num; i++) {
801 priv->tx_ring[i].bytes = 0;
802 priv->tx_ring[i].packets = 0;
803 }
804 for (i = 0; i < priv->rx_ring_num; i++) {
805 priv->rx_ring[i].bytes = 0;
806 priv->rx_ring[i].packets = 0;
807 }
808
809 mlx4_en_set_default_moderation(priv);
810 err = mlx4_en_start_port(dev);
811 if (err)
812 mlx4_err(mdev, "Failed starting port:%d\n", priv->port);
813
814out:
815 mutex_unlock(&mdev->state_lock);
816 return err;
817}
818
819
820static int mlx4_en_close(struct net_device *dev)
821{
822 struct mlx4_en_priv *priv = netdev_priv(dev);
823 struct mlx4_en_dev *mdev = priv->mdev;
824
825 if (netif_msg_ifdown(priv))
826 mlx4_info(mdev, "Close called for port:%d\n", priv->port);
827
828 mutex_lock(&mdev->state_lock);
829
830 mlx4_en_stop_port(dev);
831 netif_carrier_off(dev);
832
833 mutex_unlock(&mdev->state_lock);
834 return 0;
835}
836
837static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
838{
839 int i;
840
841 for (i = 0; i < priv->tx_ring_num; i++) {
842 if (priv->tx_ring[i].tx_info)
843 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
844 if (priv->tx_cq[i].buf)
845 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
846 }
847
848 for (i = 0; i < priv->rx_ring_num; i++) {
849 if (priv->rx_ring[i].rx_info)
850 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
851 if (priv->rx_cq[i].buf)
852 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
853 }
854}
855
856static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
857{
858 struct mlx4_en_dev *mdev = priv->mdev;
859 struct mlx4_en_port_profile *prof = priv->prof;
860 int i;
861
862 /* Create tx Rings */
863 for (i = 0; i < priv->tx_ring_num; i++) {
864 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
865 prof->tx_ring_size, i, TX))
866 goto err;
867
868 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
869 prof->tx_ring_size, TXBB_SIZE))
870 goto err;
871 }
872
873 /* Create rx Rings */
874 for (i = 0; i < priv->rx_ring_num; i++) {
875 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
876 prof->rx_ring_size, i, RX))
877 goto err;
878
879 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
880 prof->rx_ring_size, priv->stride))
881 goto err;
882 }
883
884 return 0;
885
886err:
887 mlx4_err(mdev, "Failed to allocate NIC resources\n");
888 return -ENOMEM;
889}
890
891
892void mlx4_en_destroy_netdev(struct net_device *dev)
893{
894 struct mlx4_en_priv *priv = netdev_priv(dev);
895 struct mlx4_en_dev *mdev = priv->mdev;
896
897 mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
898
899 /* Unregister device - this will close the port if it was up */
900 if (priv->registered)
901 unregister_netdev(dev);
902
903 if (priv->allocated)
904 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
905
906 cancel_delayed_work(&priv->stats_task);
907 cancel_delayed_work(&priv->refill_task);
908 /* flush any pending task for this netdev */
909 flush_workqueue(mdev->workqueue);
910
911 /* Detach the netdev so tasks would not attempt to access it */
912 mutex_lock(&mdev->state_lock);
913 mdev->pndev[priv->port] = NULL;
914 mutex_unlock(&mdev->state_lock);
915
916 mlx4_en_free_resources(priv);
917 free_netdev(dev);
918}
919
920static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
921{
922 struct mlx4_en_priv *priv = netdev_priv(dev);
923 struct mlx4_en_dev *mdev = priv->mdev;
924 int err = 0;
925
926 mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
927 dev->mtu, new_mtu);
928
929 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
930 mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu);
931 return -EPERM;
932 }
933 dev->mtu = new_mtu;
934
935 if (netif_running(dev)) {
936 mutex_lock(&mdev->state_lock);
937 if (!mdev->device_up) {
938 /* NIC is probably restarting - let watchdog task reset
939 * the port */
940 mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n");
941 } else {
942 mlx4_en_stop_port(dev);
943 mlx4_en_set_default_moderation(priv);
944 err = mlx4_en_start_port(dev);
945 if (err) {
946 mlx4_err(mdev, "Failed restarting port:%d\n",
947 priv->port);
948 queue_work(mdev->workqueue, &priv->watchdog_task);
949 }
950 }
951 mutex_unlock(&mdev->state_lock);
952 }
953 return 0;
954}
955
956int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
957 struct mlx4_en_port_profile *prof)
958{
959 struct net_device *dev;
960 struct mlx4_en_priv *priv;
961 int i;
962 int err;
963
964 dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
965 if (dev == NULL) {
966 mlx4_err(mdev, "Net device allocation failed\n");
967 return -ENOMEM;
968 }
969
970 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
971
972 /*
973 * Initialize driver private data
974 */
975
976 priv = netdev_priv(dev);
977 memset(priv, 0, sizeof(struct mlx4_en_priv));
978 priv->dev = dev;
979 priv->mdev = mdev;
980 priv->prof = prof;
981 priv->port = port;
982 priv->port_up = false;
983 priv->rx_csum = 1;
984 priv->flags = prof->flags;
985 priv->tx_ring_num = prof->tx_ring_num;
986 priv->rx_ring_num = prof->rx_ring_num;
987 priv->mc_list = NULL;
988 priv->mac_index = -1;
989 priv->msg_enable = MLX4_EN_MSG_LEVEL;
990 spin_lock_init(&priv->stats_lock);
991 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
992 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
993 INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
994 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
995 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
996 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
997
998 /* Query for default mac and max mtu */
999 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
1000 priv->mac = mdev->dev->caps.def_mac[priv->port];
1001 if (ILLEGAL_MAC(priv->mac)) {
1002 mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
1003 priv->port, priv->mac);
1004 err = -EINVAL;
1005 goto out;
1006 }
1007
1008 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
1009 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
1010 err = mlx4_en_alloc_resources(priv);
1011 if (err)
1012 goto out;
1013
1014 /* Populate Rx default RSS mappings */
1015 mlx4_en_set_default_rss_map(priv, &priv->rss_map, priv->rx_ring_num *
1016 RSS_FACTOR, priv->rx_ring_num);
1017 /* Allocate page for receive rings */
1018 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
1019 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
1020 if (err) {
1021 mlx4_err(mdev, "Failed to allocate page for rx qps\n");
1022 goto out;
1023 }
1024 priv->allocated = 1;
1025
1026 /* Populate Tx priority mappings */
1027 mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num);
1028
1029 /*
1030 * Initialize netdev entry points
1031 */
1032
1033 dev->open = &mlx4_en_open;
1034 dev->stop = &mlx4_en_close;
1035 dev->hard_start_xmit = &mlx4_en_xmit;
1036 dev->get_stats = &mlx4_en_get_stats;
1037 dev->set_multicast_list = &mlx4_en_set_multicast;
1038 dev->set_mac_address = &mlx4_en_set_mac;
1039 dev->change_mtu = &mlx4_en_change_mtu;
1040 dev->tx_timeout = &mlx4_en_tx_timeout;
1041 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1042 dev->vlan_rx_register = mlx4_en_vlan_rx_register;
1043 dev->vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid;
1044 dev->vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid;
1045#ifdef CONFIG_NET_POLL_CONTROLLER
1046 dev->poll_controller = mlx4_en_netpoll;
1047#endif
1048 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1049
1050 /* Set defualt MAC */
1051 dev->addr_len = ETH_ALEN;
1052 for (i = 0; i < ETH_ALEN; i++)
1053 dev->dev_addr[ETH_ALEN - 1 - i] =
1054 (u8) (priv->mac >> (8 * i));
1055
1056 /*
1057 * Set driver features
1058 */
1059 dev->features |= NETIF_F_SG;
1060 dev->features |= NETIF_F_HW_CSUM;
1061 dev->features |= NETIF_F_HIGHDMA;
1062 dev->features |= NETIF_F_HW_VLAN_TX |
1063 NETIF_F_HW_VLAN_RX |
1064 NETIF_F_HW_VLAN_FILTER;
1065 if (mdev->profile.num_lro)
1066 dev->features |= NETIF_F_LRO;
1067 if (mdev->LSO_support) {
1068 dev->features |= NETIF_F_TSO;
1069 dev->features |= NETIF_F_TSO6;
1070 }
1071
1072 mdev->pndev[port] = dev;
1073
1074 netif_carrier_off(dev);
1075 err = register_netdev(dev);
1076 if (err) {
1077 mlx4_err(mdev, "Netdev registration failed\n");
1078 goto out;
1079 }
1080 priv->registered = 1;
1081 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1082 return 0;
1083
1084out:
1085 mlx4_en_destroy_netdev(dev);
1086 return err;
1087}
1088
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
new file mode 100644
index 000000000000..c2e69b1bcd0a
--- /dev/null
+++ b/drivers/net/mlx4/en_params.c
@@ -0,0 +1,480 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/ethtool.h>
36#include <linux/netdevice.h>
37
38#include "mlx4_en.h"
39#include "en_port.h"
40
41#define MLX4_EN_PARM_INT(X, def_val, desc) \
42 static unsigned int X = def_val;\
43 module_param(X , uint, 0444); \
44 MODULE_PARM_DESC(X, desc);
45
46
47/*
48 * Device scope module parameters
49 */
50
51
52/* Use a XOR rathern than Toeplitz hash function for RSS */
53MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
54
55/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
56MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
57
58/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
59MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
60 "Number of LRO sessions per ring or disabled (0)");
61
62/* Priority pausing */
63MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE,
64 "Pause policy on TX: 0 never generate pause frames "
65 "1 generate pause frames according to RX buffer threshold");
66MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE,
67 "Pause policy on RX: 0 ignore received pause frames "
68 "1 respect received pause frames");
69MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
70 " Per priority bit mask");
71MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
72 " Per priority bit mask");
73
74/* Interrupt moderation tunning */
75MLX4_EN_PARM_INT(rx_moder_cnt, MLX4_EN_AUTO_CONF,
76 "Max coalesced descriptors for Rx interrupt moderation");
77MLX4_EN_PARM_INT(rx_moder_time, MLX4_EN_AUTO_CONF,
78 "Timeout following last packet for Rx interrupt moderation");
79MLX4_EN_PARM_INT(auto_moder, 1, "Enable dynamic interrupt moderation");
80
81MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
82MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
83
84MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
85MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
86MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
87MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
88
89
90int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
91{
92 struct mlx4_en_profile *params = &mdev->profile;
93
94 params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF);
95 params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF);
96 params->auto_moder = auto_moder;
97 params->rss_xor = (rss_xor != 0);
98 params->rss_mask = rss_mask & 0x1f;
99 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
100 params->rx_pause = pprx;
101 params->rx_ppp = pfcrx;
102 params->tx_pause = pptx;
103 params->tx_ppp = pfctx;
104 if (params->rx_ppp || params->tx_ppp) {
105 params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
106 params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM;
107 } else {
108 params->prof[1].tx_ring_num = 1;
109 params->prof[2].tx_ring_num = 1;
110 }
111 params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS);
112 params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
113
114 if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
115 tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
116 params->prof[1].tx_ring_size =
117 (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
118 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
119
120 if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
121 tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
122 params->prof[2].tx_ring_size =
123 (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
124 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
125
126 if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
127 rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
128 params->prof[1].rx_ring_size =
129 (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
130 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
131
132 if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
133 rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
134 params->prof[2].rx_ring_size =
135 (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
136 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
137 return 0;
138}
139
140
141/*
142 * Ethtool support
143 */
144
145static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
146{
147 int i;
148
149 priv->port_stats.lro_aggregated = 0;
150 priv->port_stats.lro_flushed = 0;
151 priv->port_stats.lro_no_desc = 0;
152
153 for (i = 0; i < priv->rx_ring_num; i++) {
154 priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
155 priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
156 priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
157 }
158}
159
160static void
161mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
162{
163 struct mlx4_en_priv *priv = netdev_priv(dev);
164 struct mlx4_en_dev *mdev = priv->mdev;
165
166 sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
167 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
168 sprintf(drvinfo->fw_version, "%d.%d.%d",
169 (u16) (mdev->dev->caps.fw_ver >> 32),
170 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
171 (u16) (mdev->dev->caps.fw_ver & 0xffff));
172 strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
173 drvinfo->n_stats = 0;
174 drvinfo->regdump_len = 0;
175 drvinfo->eedump_len = 0;
176}
177
178static u32 mlx4_en_get_tso(struct net_device *dev)
179{
180 return (dev->features & NETIF_F_TSO) != 0;
181}
182
183static int mlx4_en_set_tso(struct net_device *dev, u32 data)
184{
185 struct mlx4_en_priv *priv = netdev_priv(dev);
186
187 if (data) {
188 if (!priv->mdev->LSO_support)
189 return -EPERM;
190 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
191 } else
192 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
193 return 0;
194}
195
196static u32 mlx4_en_get_rx_csum(struct net_device *dev)
197{
198 struct mlx4_en_priv *priv = netdev_priv(dev);
199 return priv->rx_csum;
200}
201
202static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
203{
204 struct mlx4_en_priv *priv = netdev_priv(dev);
205 priv->rx_csum = (data != 0);
206 return 0;
207}
208
209static const char main_strings[][ETH_GSTRING_LEN] = {
210 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
211 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
212 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
213 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
214 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
215 "tx_heartbeat_errors", "tx_window_errors",
216
217 /* port statistics */
218 "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
219 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
220 "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
221
222 /* packet statistics */
223 "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
224 "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
225 "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
226 "tx_prio_6", "tx_prio_7",
227};
228#define NUM_MAIN_STATS 21
229#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
230
231static u32 mlx4_en_get_msglevel(struct net_device *dev)
232{
233 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
234}
235
236static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
237{
238 ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
239}
240
241static void mlx4_en_get_wol(struct net_device *netdev,
242 struct ethtool_wolinfo *wol)
243{
244 wol->supported = 0;
245 wol->wolopts = 0;
246
247 return;
248}
249
250static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
251{
252 struct mlx4_en_priv *priv = netdev_priv(dev);
253
254 if (sset != ETH_SS_STATS)
255 return -EOPNOTSUPP;
256
257 return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
258}
259
260static void mlx4_en_get_ethtool_stats(struct net_device *dev,
261 struct ethtool_stats *stats, uint64_t *data)
262{
263 struct mlx4_en_priv *priv = netdev_priv(dev);
264 int index = 0;
265 int i;
266
267 spin_lock_bh(&priv->stats_lock);
268
269 mlx4_en_update_lro_stats(priv);
270
271 for (i = 0; i < NUM_MAIN_STATS; i++)
272 data[index++] = ((unsigned long *) &priv->stats)[i];
273 for (i = 0; i < NUM_PORT_STATS; i++)
274 data[index++] = ((unsigned long *) &priv->port_stats)[i];
275 for (i = 0; i < priv->tx_ring_num; i++) {
276 data[index++] = priv->tx_ring[i].packets;
277 data[index++] = priv->tx_ring[i].bytes;
278 }
279 for (i = 0; i < priv->rx_ring_num; i++) {
280 data[index++] = priv->rx_ring[i].packets;
281 data[index++] = priv->rx_ring[i].bytes;
282 }
283 for (i = 0; i < NUM_PKT_STATS; i++)
284 data[index++] = ((unsigned long *) &priv->pkstats)[i];
285 spin_unlock_bh(&priv->stats_lock);
286
287}
288
289static void mlx4_en_get_strings(struct net_device *dev,
290 uint32_t stringset, uint8_t *data)
291{
292 struct mlx4_en_priv *priv = netdev_priv(dev);
293 int index = 0;
294 int i;
295
296 if (stringset != ETH_SS_STATS)
297 return;
298
299 /* Add main counters */
300 for (i = 0; i < NUM_MAIN_STATS; i++)
301 strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
302 for (i = 0; i < NUM_PORT_STATS; i++)
303 strcpy(data + (index++) * ETH_GSTRING_LEN,
304 main_strings[i + NUM_MAIN_STATS]);
305 for (i = 0; i < priv->tx_ring_num; i++) {
306 sprintf(data + (index++) * ETH_GSTRING_LEN,
307 "tx%d_packets", i);
308 sprintf(data + (index++) * ETH_GSTRING_LEN,
309 "tx%d_bytes", i);
310 }
311 for (i = 0; i < priv->rx_ring_num; i++) {
312 sprintf(data + (index++) * ETH_GSTRING_LEN,
313 "rx%d_packets", i);
314 sprintf(data + (index++) * ETH_GSTRING_LEN,
315 "rx%d_bytes", i);
316 }
317 for (i = 0; i < NUM_PKT_STATS; i++)
318 strcpy(data + (index++) * ETH_GSTRING_LEN,
319 main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
320}
321
322static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
323{
324 cmd->autoneg = AUTONEG_DISABLE;
325 cmd->supported = SUPPORTED_10000baseT_Full;
326 cmd->advertising = SUPPORTED_10000baseT_Full;
327 if (netif_carrier_ok(dev)) {
328 cmd->speed = SPEED_10000;
329 cmd->duplex = DUPLEX_FULL;
330 } else {
331 cmd->speed = -1;
332 cmd->duplex = -1;
333 }
334 return 0;
335}
336
337static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
338{
339 if ((cmd->autoneg == AUTONEG_ENABLE) ||
340 (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL))
341 return -EINVAL;
342
343 /* Nothing to change */
344 return 0;
345}
346
347static int mlx4_en_get_coalesce(struct net_device *dev,
348 struct ethtool_coalesce *coal)
349{
350 struct mlx4_en_priv *priv = netdev_priv(dev);
351
352 coal->tx_coalesce_usecs = 0;
353 coal->tx_max_coalesced_frames = 0;
354 coal->rx_coalesce_usecs = priv->rx_usecs;
355 coal->rx_max_coalesced_frames = priv->rx_frames;
356
357 coal->pkt_rate_low = priv->pkt_rate_low;
358 coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
359 coal->pkt_rate_high = priv->pkt_rate_high;
360 coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
361 coal->rate_sample_interval = priv->sample_interval;
362 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
363 return 0;
364}
365
366static int mlx4_en_set_coalesce(struct net_device *dev,
367 struct ethtool_coalesce *coal)
368{
369 struct mlx4_en_priv *priv = netdev_priv(dev);
370 int err, i;
371
372 priv->rx_frames = (coal->rx_max_coalesced_frames ==
373 MLX4_EN_AUTO_CONF) ?
374 MLX4_EN_RX_COAL_TARGET /
375 priv->dev->mtu + 1 :
376 coal->rx_max_coalesced_frames;
377 priv->rx_usecs = (coal->rx_coalesce_usecs ==
378 MLX4_EN_AUTO_CONF) ?
379 MLX4_EN_RX_COAL_TIME :
380 coal->rx_coalesce_usecs;
381
382 /* Set adaptive coalescing params */
383 priv->pkt_rate_low = coal->pkt_rate_low;
384 priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
385 priv->pkt_rate_high = coal->pkt_rate_high;
386 priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
387 priv->sample_interval = coal->rate_sample_interval;
388 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
389 priv->last_moder_time = MLX4_EN_AUTO_CONF;
390 if (priv->adaptive_rx_coal)
391 return 0;
392
393 for (i = 0; i < priv->rx_ring_num; i++) {
394 priv->rx_cq[i].moder_cnt = priv->rx_frames;
395 priv->rx_cq[i].moder_time = priv->rx_usecs;
396 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
397 if (err)
398 return err;
399 }
400 return 0;
401}
402
403static int mlx4_en_set_pauseparam(struct net_device *dev,
404 struct ethtool_pauseparam *pause)
405{
406 struct mlx4_en_priv *priv = netdev_priv(dev);
407 struct mlx4_en_dev *mdev = priv->mdev;
408 int err;
409
410 mdev->profile.tx_pause = pause->tx_pause != 0;
411 mdev->profile.rx_pause = pause->rx_pause != 0;
412 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
413 priv->rx_skb_size + ETH_FCS_LEN,
414 mdev->profile.tx_pause,
415 mdev->profile.tx_ppp,
416 mdev->profile.rx_pause,
417 mdev->profile.rx_ppp);
418 if (err)
419 mlx4_err(mdev, "Failed setting pause params to\n");
420
421 return err;
422}
423
424static void mlx4_en_get_pauseparam(struct net_device *dev,
425 struct ethtool_pauseparam *pause)
426{
427 struct mlx4_en_priv *priv = netdev_priv(dev);
428 struct mlx4_en_dev *mdev = priv->mdev;
429
430 pause->tx_pause = mdev->profile.tx_pause;
431 pause->rx_pause = mdev->profile.rx_pause;
432}
433
434static void mlx4_en_get_ringparam(struct net_device *dev,
435 struct ethtool_ringparam *param)
436{
437 struct mlx4_en_priv *priv = netdev_priv(dev);
438 struct mlx4_en_dev *mdev = priv->mdev;
439
440 memset(param, 0, sizeof(*param));
441 param->rx_max_pending = mdev->dev->caps.max_rq_sg;
442 param->tx_max_pending = mdev->dev->caps.max_sq_sg;
443 param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
444 param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
445}
446
447const struct ethtool_ops mlx4_en_ethtool_ops = {
448 .get_drvinfo = mlx4_en_get_drvinfo,
449 .get_settings = mlx4_en_get_settings,
450 .set_settings = mlx4_en_set_settings,
451#ifdef NETIF_F_TSO
452 .get_tso = mlx4_en_get_tso,
453 .set_tso = mlx4_en_set_tso,
454#endif
455 .get_sg = ethtool_op_get_sg,
456 .set_sg = ethtool_op_set_sg,
457 .get_link = ethtool_op_get_link,
458 .get_rx_csum = mlx4_en_get_rx_csum,
459 .set_rx_csum = mlx4_en_set_rx_csum,
460 .get_tx_csum = ethtool_op_get_tx_csum,
461 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
462 .get_strings = mlx4_en_get_strings,
463 .get_sset_count = mlx4_en_get_sset_count,
464 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
465 .get_wol = mlx4_en_get_wol,
466 .get_msglevel = mlx4_en_get_msglevel,
467 .set_msglevel = mlx4_en_set_msglevel,
468 .get_coalesce = mlx4_en_get_coalesce,
469 .set_coalesce = mlx4_en_set_coalesce,
470 .get_pauseparam = mlx4_en_get_pauseparam,
471 .set_pauseparam = mlx4_en_set_pauseparam,
472 .get_ringparam = mlx4_en_get_ringparam,
473 .get_flags = ethtool_op_get_flags,
474 .set_flags = ethtool_op_set_flags,
475};
476
477
478
479
480
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
new file mode 100644
index 000000000000..c5a4c0389752
--- /dev/null
+++ b/drivers/net/mlx4/en_port.c
@@ -0,0 +1,261 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34
35#include <linux/if_vlan.h>
36
37#include <linux/mlx4/device.h>
38#include <linux/mlx4/cmd.h>
39
40#include "en_port.h"
41#include "mlx4_en.h"
42
43
44int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
45 u64 mac, u64 clear, u8 mode)
46{
47 return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
48 MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
49}
50
51int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
52{
53 struct mlx4_cmd_mailbox *mailbox;
54 struct mlx4_set_vlan_fltr_mbox *filter;
55 int i;
56 int j;
57 int index = 0;
58 u32 entry;
59 int err = 0;
60
61 mailbox = mlx4_alloc_cmd_mailbox(dev);
62 if (IS_ERR(mailbox))
63 return PTR_ERR(mailbox);
64
65 filter = mailbox->buf;
66 if (grp) {
67 memset(filter, 0, sizeof *filter);
68 for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
69 entry = 0;
70 for (j = 0; j < 32; j++)
71 if (vlan_group_get_device(grp, index++))
72 entry |= 1 << j;
73 filter->entry[i] = cpu_to_be32(entry);
74 }
75 } else {
76 /* When no vlans are configured we block all vlans */
77 memset(filter, 0, sizeof(*filter));
78 }
79 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
80 MLX4_CMD_TIME_CLASS_B);
81 mlx4_free_cmd_mailbox(dev, mailbox);
82 return err;
83}
84
85
86int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
87 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
88{
89 struct mlx4_cmd_mailbox *mailbox;
90 struct mlx4_set_port_general_context *context;
91 int err;
92 u32 in_mod;
93
94 mailbox = mlx4_alloc_cmd_mailbox(dev);
95 if (IS_ERR(mailbox))
96 return PTR_ERR(mailbox);
97 context = mailbox->buf;
98 memset(context, 0, sizeof *context);
99
100 context->flags = SET_PORT_GEN_ALL_VALID;
101 context->mtu = cpu_to_be16(mtu);
102 context->pptx = (pptx * (!pfctx)) << 7;
103 context->pfctx = pfctx;
104 context->pprx = (pprx * (!pfcrx)) << 7;
105 context->pfcrx = pfcrx;
106
107 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
108 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
109 MLX4_CMD_TIME_CLASS_B);
110
111 mlx4_free_cmd_mailbox(dev, mailbox);
112 return err;
113}
114
115int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
116 u8 promisc)
117{
118 struct mlx4_cmd_mailbox *mailbox;
119 struct mlx4_set_port_rqp_calc_context *context;
120 int err;
121 u32 in_mod;
122
123 mailbox = mlx4_alloc_cmd_mailbox(dev);
124 if (IS_ERR(mailbox))
125 return PTR_ERR(mailbox);
126 context = mailbox->buf;
127 memset(context, 0, sizeof *context);
128
129 context->base_qpn = cpu_to_be32(base_qpn);
130 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
131 context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
132 context->intra_no_vlan = 0;
133 context->no_vlan = MLX4_NO_VLAN_IDX;
134 context->intra_vlan_miss = 0;
135 context->vlan_miss = MLX4_VLAN_MISS_IDX;
136
137 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
138 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
139 MLX4_CMD_TIME_CLASS_B);
140
141 mlx4_free_cmd_mailbox(dev, mailbox);
142 return err;
143}
144
145
146int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
147{
148 struct mlx4_en_stat_out_mbox *mlx4_en_stats;
149 struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
150 struct net_device_stats *stats = &priv->stats;
151 struct mlx4_cmd_mailbox *mailbox;
152 u64 in_mod = reset << 8 | port;
153 int err;
154
155 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
156 if (IS_ERR(mailbox))
157 return PTR_ERR(mailbox);
158 memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
159 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
160 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
161 if (err)
162 goto out;
163
164 mlx4_en_stats = mailbox->buf;
165
166 spin_lock_bh(&priv->stats_lock);
167
168 stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) -
169 be32_to_cpu(mlx4_en_stats->RDROP);
170 stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
171 be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
172 be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
173 be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
174 be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
175 be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
176 be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
177 be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
178 be64_to_cpu(mlx4_en_stats->TTOT_novlan) +
179 be64_to_cpu(mlx4_en_stats->TTOT_loopbk);
180 stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
181 be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
182 be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
183 be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
184 be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
185 be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
186 be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
187 be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
188 be64_to_cpu(mlx4_en_stats->ROCT_novlan);
189
190 stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) +
191 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) +
192 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) +
193 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) +
194 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) +
195 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) +
196 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) +
197 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) +
198 be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) +
199 be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk);
200
201 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
202 be32_to_cpu(mlx4_en_stats->RdropLength) +
203 be32_to_cpu(mlx4_en_stats->RJBBR) +
204 be32_to_cpu(mlx4_en_stats->RCRC) +
205 be32_to_cpu(mlx4_en_stats->RRUNT);
206 stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
207 stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
208 be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
209 be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
210 be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
211 be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
212 be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
213 be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
214 be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
215 be64_to_cpu(mlx4_en_stats->MCAST_novlan);
216 stats->collisions = 0;
217 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
218 stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
219 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
220 stats->rx_frame_errors = 0;
221 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
222 stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
223 stats->tx_aborted_errors = 0;
224 stats->tx_carrier_errors = 0;
225 stats->tx_fifo_errors = 0;
226 stats->tx_heartbeat_errors = 0;
227 stats->tx_window_errors = 0;
228
229 priv->pkstats.broadcast =
230 be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
231 be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
232 be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
233 be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
234 be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
235 be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
236 be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
237 be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
238 be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
239 priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
240 priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
241 priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
242 priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
243 priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
244 priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
245 priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
246 priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
247 priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
248 priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
249 priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
250 priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
251 priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
252 priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
253 priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
254 priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
255 spin_unlock_bh(&priv->stats_lock);
256
257out:
258 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
259 return err;
260}
261
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
new file mode 100644
index 000000000000..e6477f12beb5
--- /dev/null
+++ b/drivers/net/mlx4/en_port.h
@@ -0,0 +1,570 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef _MLX4_EN_PORT_H_
35#define _MLX4_EN_PORT_H_
36
37
38#define SET_PORT_GEN_ALL_VALID 0x7
39#define SET_PORT_PROMISC_SHIFT 31
40
41enum {
42 MLX4_CMD_SET_VLAN_FLTR = 0x47,
43 MLX4_CMD_SET_MCAST_FLTR = 0x48,
44 MLX4_CMD_DUMP_ETH_STATS = 0x49,
45};
46
47struct mlx4_set_port_general_context {
48 u8 reserved[3];
49 u8 flags;
50 u16 reserved2;
51 __be16 mtu;
52 u8 pptx;
53 u8 pfctx;
54 u16 reserved3;
55 u8 pprx;
56 u8 pfcrx;
57 u16 reserved4;
58};
59
60struct mlx4_set_port_rqp_calc_context {
61 __be32 base_qpn;
62 __be32 flags;
63 u8 reserved[3];
64 u8 mac_miss;
65 u8 intra_no_vlan;
66 u8 no_vlan;
67 u8 intra_vlan_miss;
68 u8 vlan_miss;
69 u8 reserved2[3];
70 u8 no_vlan_prio;
71 __be32 promisc;
72 __be32 mcast;
73};
74
75#define VLAN_FLTR_SIZE 128
76struct mlx4_set_vlan_fltr_mbox {
77 __be32 entry[VLAN_FLTR_SIZE];
78};
79
80
81enum {
82 MLX4_MCAST_CONFIG = 0,
83 MLX4_MCAST_DISABLE = 1,
84 MLX4_MCAST_ENABLE = 2,
85};
86
87
88struct mlx4_en_stat_out_mbox {
89 /* Received frames with a length of 64 octets */
90 __be64 R64_prio_0;
91 __be64 R64_prio_1;
92 __be64 R64_prio_2;
93 __be64 R64_prio_3;
94 __be64 R64_prio_4;
95 __be64 R64_prio_5;
96 __be64 R64_prio_6;
97 __be64 R64_prio_7;
98 __be64 R64_novlan;
99 /* Received frames with a length of 127 octets */
100 __be64 R127_prio_0;
101 __be64 R127_prio_1;
102 __be64 R127_prio_2;
103 __be64 R127_prio_3;
104 __be64 R127_prio_4;
105 __be64 R127_prio_5;
106 __be64 R127_prio_6;
107 __be64 R127_prio_7;
108 __be64 R127_novlan;
109 /* Received frames with a length of 255 octets */
110 __be64 R255_prio_0;
111 __be64 R255_prio_1;
112 __be64 R255_prio_2;
113 __be64 R255_prio_3;
114 __be64 R255_prio_4;
115 __be64 R255_prio_5;
116 __be64 R255_prio_6;
117 __be64 R255_prio_7;
118 __be64 R255_novlan;
119 /* Received frames with a length of 511 octets */
120 __be64 R511_prio_0;
121 __be64 R511_prio_1;
122 __be64 R511_prio_2;
123 __be64 R511_prio_3;
124 __be64 R511_prio_4;
125 __be64 R511_prio_5;
126 __be64 R511_prio_6;
127 __be64 R511_prio_7;
128 __be64 R511_novlan;
129 /* Received frames with a length of 1023 octets */
130 __be64 R1023_prio_0;
131 __be64 R1023_prio_1;
132 __be64 R1023_prio_2;
133 __be64 R1023_prio_3;
134 __be64 R1023_prio_4;
135 __be64 R1023_prio_5;
136 __be64 R1023_prio_6;
137 __be64 R1023_prio_7;
138 __be64 R1023_novlan;
139 /* Received frames with a length of 1518 octets */
140 __be64 R1518_prio_0;
141 __be64 R1518_prio_1;
142 __be64 R1518_prio_2;
143 __be64 R1518_prio_3;
144 __be64 R1518_prio_4;
145 __be64 R1518_prio_5;
146 __be64 R1518_prio_6;
147 __be64 R1518_prio_7;
148 __be64 R1518_novlan;
149 /* Received frames with a length of 1522 octets */
150 __be64 R1522_prio_0;
151 __be64 R1522_prio_1;
152 __be64 R1522_prio_2;
153 __be64 R1522_prio_3;
154 __be64 R1522_prio_4;
155 __be64 R1522_prio_5;
156 __be64 R1522_prio_6;
157 __be64 R1522_prio_7;
158 __be64 R1522_novlan;
159 /* Received frames with a length of 1548 octets */
160 __be64 R1548_prio_0;
161 __be64 R1548_prio_1;
162 __be64 R1548_prio_2;
163 __be64 R1548_prio_3;
164 __be64 R1548_prio_4;
165 __be64 R1548_prio_5;
166 __be64 R1548_prio_6;
167 __be64 R1548_prio_7;
168 __be64 R1548_novlan;
169 /* Received frames with a length of 1548 < octets < MTU */
170 __be64 R2MTU_prio_0;
171 __be64 R2MTU_prio_1;
172 __be64 R2MTU_prio_2;
173 __be64 R2MTU_prio_3;
174 __be64 R2MTU_prio_4;
175 __be64 R2MTU_prio_5;
176 __be64 R2MTU_prio_6;
177 __be64 R2MTU_prio_7;
178 __be64 R2MTU_novlan;
179 /* Received frames with a length of MTU< octets and good CRC */
180 __be64 RGIANT_prio_0;
181 __be64 RGIANT_prio_1;
182 __be64 RGIANT_prio_2;
183 __be64 RGIANT_prio_3;
184 __be64 RGIANT_prio_4;
185 __be64 RGIANT_prio_5;
186 __be64 RGIANT_prio_6;
187 __be64 RGIANT_prio_7;
188 __be64 RGIANT_novlan;
189 /* Received broadcast frames with good CRC */
190 __be64 RBCAST_prio_0;
191 __be64 RBCAST_prio_1;
192 __be64 RBCAST_prio_2;
193 __be64 RBCAST_prio_3;
194 __be64 RBCAST_prio_4;
195 __be64 RBCAST_prio_5;
196 __be64 RBCAST_prio_6;
197 __be64 RBCAST_prio_7;
198 __be64 RBCAST_novlan;
199 /* Received multicast frames with good CRC */
200 __be64 MCAST_prio_0;
201 __be64 MCAST_prio_1;
202 __be64 MCAST_prio_2;
203 __be64 MCAST_prio_3;
204 __be64 MCAST_prio_4;
205 __be64 MCAST_prio_5;
206 __be64 MCAST_prio_6;
207 __be64 MCAST_prio_7;
208 __be64 MCAST_novlan;
209 /* Received unicast not short or GIANT frames with good CRC */
210 __be64 RTOTG_prio_0;
211 __be64 RTOTG_prio_1;
212 __be64 RTOTG_prio_2;
213 __be64 RTOTG_prio_3;
214 __be64 RTOTG_prio_4;
215 __be64 RTOTG_prio_5;
216 __be64 RTOTG_prio_6;
217 __be64 RTOTG_prio_7;
218 __be64 RTOTG_novlan;
219
220 /* Count of total octets of received frames, includes framing characters */
221 __be64 RTTLOCT_prio_0;
222 /* Count of total octets of received frames, not including framing
223 characters */
224 __be64 RTTLOCT_NOFRM_prio_0;
225 /* Count of Total number of octets received
226 (only for frames without errors) */
227 __be64 ROCT_prio_0;
228
229 __be64 RTTLOCT_prio_1;
230 __be64 RTTLOCT_NOFRM_prio_1;
231 __be64 ROCT_prio_1;
232
233 __be64 RTTLOCT_prio_2;
234 __be64 RTTLOCT_NOFRM_prio_2;
235 __be64 ROCT_prio_2;
236
237 __be64 RTTLOCT_prio_3;
238 __be64 RTTLOCT_NOFRM_prio_3;
239 __be64 ROCT_prio_3;
240
241 __be64 RTTLOCT_prio_4;
242 __be64 RTTLOCT_NOFRM_prio_4;
243 __be64 ROCT_prio_4;
244
245 __be64 RTTLOCT_prio_5;
246 __be64 RTTLOCT_NOFRM_prio_5;
247 __be64 ROCT_prio_5;
248
249 __be64 RTTLOCT_prio_6;
250 __be64 RTTLOCT_NOFRM_prio_6;
251 __be64 ROCT_prio_6;
252
253 __be64 RTTLOCT_prio_7;
254 __be64 RTTLOCT_NOFRM_prio_7;
255 __be64 ROCT_prio_7;
256
257 __be64 RTTLOCT_novlan;
258 __be64 RTTLOCT_NOFRM_novlan;
259 __be64 ROCT_novlan;
260
261 /* Count of Total received frames including bad frames */
262 __be64 RTOT_prio_0;
263 /* Count of Total number of received frames with 802.1Q encapsulation */
264 __be64 R1Q_prio_0;
265 __be64 reserved1;
266
267 __be64 RTOT_prio_1;
268 __be64 R1Q_prio_1;
269 __be64 reserved2;
270
271 __be64 RTOT_prio_2;
272 __be64 R1Q_prio_2;
273 __be64 reserved3;
274
275 __be64 RTOT_prio_3;
276 __be64 R1Q_prio_3;
277 __be64 reserved4;
278
279 __be64 RTOT_prio_4;
280 __be64 R1Q_prio_4;
281 __be64 reserved5;
282
283 __be64 RTOT_prio_5;
284 __be64 R1Q_prio_5;
285 __be64 reserved6;
286
287 __be64 RTOT_prio_6;
288 __be64 R1Q_prio_6;
289 __be64 reserved7;
290
291 __be64 RTOT_prio_7;
292 __be64 R1Q_prio_7;
293 __be64 reserved8;
294
295 __be64 RTOT_novlan;
296 __be64 R1Q_novlan;
297 __be64 reserved9;
298
299 /* Total number of Successfully Received Control Frames */
300 __be64 RCNTL;
301 __be64 reserved10;
302 __be64 reserved11;
303 __be64 reserved12;
304 /* Count of received frames with a length/type field value between 46
305 (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames),
306 inclusive */
307 __be64 RInRangeLengthErr;
308 /* Count of received frames with length/type field between 1501 and 1535
309 decimal, inclusive */
310 __be64 ROutRangeLengthErr;
311 /* Count of received frames that are longer than max allowed size for
312 802.3 frames (1518/1522) */
313 __be64 RFrmTooLong;
314 /* Count frames received with PCS error */
315 __be64 PCS;
316
317 /* Transmit frames with a length of 64 octets */
318 __be64 T64_prio_0;
319 __be64 T64_prio_1;
320 __be64 T64_prio_2;
321 __be64 T64_prio_3;
322 __be64 T64_prio_4;
323 __be64 T64_prio_5;
324 __be64 T64_prio_6;
325 __be64 T64_prio_7;
326 __be64 T64_novlan;
327 __be64 T64_loopbk;
328 /* Transmit frames with a length of 65 to 127 octets. */
329 __be64 T127_prio_0;
330 __be64 T127_prio_1;
331 __be64 T127_prio_2;
332 __be64 T127_prio_3;
333 __be64 T127_prio_4;
334 __be64 T127_prio_5;
335 __be64 T127_prio_6;
336 __be64 T127_prio_7;
337 __be64 T127_novlan;
338 __be64 T127_loopbk;
339 /* Transmit frames with a length of 128 to 255 octets */
340 __be64 T255_prio_0;
341 __be64 T255_prio_1;
342 __be64 T255_prio_2;
343 __be64 T255_prio_3;
344 __be64 T255_prio_4;
345 __be64 T255_prio_5;
346 __be64 T255_prio_6;
347 __be64 T255_prio_7;
348 __be64 T255_novlan;
349 __be64 T255_loopbk;
350 /* Transmit frames with a length of 256 to 511 octets */
351 __be64 T511_prio_0;
352 __be64 T511_prio_1;
353 __be64 T511_prio_2;
354 __be64 T511_prio_3;
355 __be64 T511_prio_4;
356 __be64 T511_prio_5;
357 __be64 T511_prio_6;
358 __be64 T511_prio_7;
359 __be64 T511_novlan;
360 __be64 T511_loopbk;
361 /* Transmit frames with a length of 512 to 1023 octets */
362 __be64 T1023_prio_0;
363 __be64 T1023_prio_1;
364 __be64 T1023_prio_2;
365 __be64 T1023_prio_3;
366 __be64 T1023_prio_4;
367 __be64 T1023_prio_5;
368 __be64 T1023_prio_6;
369 __be64 T1023_prio_7;
370 __be64 T1023_novlan;
371 __be64 T1023_loopbk;
372 /* Transmit frames with a length of 1024 to 1518 octets */
373 __be64 T1518_prio_0;
374 __be64 T1518_prio_1;
375 __be64 T1518_prio_2;
376 __be64 T1518_prio_3;
377 __be64 T1518_prio_4;
378 __be64 T1518_prio_5;
379 __be64 T1518_prio_6;
380 __be64 T1518_prio_7;
381 __be64 T1518_novlan;
382 __be64 T1518_loopbk;
383 /* Counts transmit frames with a length of 1519 to 1522 bytes */
384 __be64 T1522_prio_0;
385 __be64 T1522_prio_1;
386 __be64 T1522_prio_2;
387 __be64 T1522_prio_3;
388 __be64 T1522_prio_4;
389 __be64 T1522_prio_5;
390 __be64 T1522_prio_6;
391 __be64 T1522_prio_7;
392 __be64 T1522_novlan;
393 __be64 T1522_loopbk;
394 /* Transmit frames with a length of 1523 to 1548 octets */
395 __be64 T1548_prio_0;
396 __be64 T1548_prio_1;
397 __be64 T1548_prio_2;
398 __be64 T1548_prio_3;
399 __be64 T1548_prio_4;
400 __be64 T1548_prio_5;
401 __be64 T1548_prio_6;
402 __be64 T1548_prio_7;
403 __be64 T1548_novlan;
404 __be64 T1548_loopbk;
405 /* Counts transmit frames with a length of 1549 to MTU bytes */
406 __be64 T2MTU_prio_0;
407 __be64 T2MTU_prio_1;
408 __be64 T2MTU_prio_2;
409 __be64 T2MTU_prio_3;
410 __be64 T2MTU_prio_4;
411 __be64 T2MTU_prio_5;
412 __be64 T2MTU_prio_6;
413 __be64 T2MTU_prio_7;
414 __be64 T2MTU_novlan;
415 __be64 T2MTU_loopbk;
416 /* Transmit frames with a length greater than MTU octets and a good CRC. */
417 __be64 TGIANT_prio_0;
418 __be64 TGIANT_prio_1;
419 __be64 TGIANT_prio_2;
420 __be64 TGIANT_prio_3;
421 __be64 TGIANT_prio_4;
422 __be64 TGIANT_prio_5;
423 __be64 TGIANT_prio_6;
424 __be64 TGIANT_prio_7;
425 __be64 TGIANT_novlan;
426 __be64 TGIANT_loopbk;
427 /* Transmit broadcast frames with a good CRC */
428 __be64 TBCAST_prio_0;
429 __be64 TBCAST_prio_1;
430 __be64 TBCAST_prio_2;
431 __be64 TBCAST_prio_3;
432 __be64 TBCAST_prio_4;
433 __be64 TBCAST_prio_5;
434 __be64 TBCAST_prio_6;
435 __be64 TBCAST_prio_7;
436 __be64 TBCAST_novlan;
437 __be64 TBCAST_loopbk;
438 /* Transmit multicast frames with a good CRC */
439 __be64 TMCAST_prio_0;
440 __be64 TMCAST_prio_1;
441 __be64 TMCAST_prio_2;
442 __be64 TMCAST_prio_3;
443 __be64 TMCAST_prio_4;
444 __be64 TMCAST_prio_5;
445 __be64 TMCAST_prio_6;
446 __be64 TMCAST_prio_7;
447 __be64 TMCAST_novlan;
448 __be64 TMCAST_loopbk;
449 /* Transmit good frames that are neither broadcast nor multicast */
450 __be64 TTOTG_prio_0;
451 __be64 TTOTG_prio_1;
452 __be64 TTOTG_prio_2;
453 __be64 TTOTG_prio_3;
454 __be64 TTOTG_prio_4;
455 __be64 TTOTG_prio_5;
456 __be64 TTOTG_prio_6;
457 __be64 TTOTG_prio_7;
458 __be64 TTOTG_novlan;
459 __be64 TTOTG_loopbk;
460
461 /* total octets of transmitted frames, including framing characters */
462 __be64 TTTLOCT_prio_0;
463 /* total octets of transmitted frames, not including framing characters */
464 __be64 TTTLOCT_NOFRM_prio_0;
465 /* ifOutOctets */
466 __be64 TOCT_prio_0;
467
468 __be64 TTTLOCT_prio_1;
469 __be64 TTTLOCT_NOFRM_prio_1;
470 __be64 TOCT_prio_1;
471
472 __be64 TTTLOCT_prio_2;
473 __be64 TTTLOCT_NOFRM_prio_2;
474 __be64 TOCT_prio_2;
475
476 __be64 TTTLOCT_prio_3;
477 __be64 TTTLOCT_NOFRM_prio_3;
478 __be64 TOCT_prio_3;
479
480 __be64 TTTLOCT_prio_4;
481 __be64 TTTLOCT_NOFRM_prio_4;
482 __be64 TOCT_prio_4;
483
484 __be64 TTTLOCT_prio_5;
485 __be64 TTTLOCT_NOFRM_prio_5;
486 __be64 TOCT_prio_5;
487
488 __be64 TTTLOCT_prio_6;
489 __be64 TTTLOCT_NOFRM_prio_6;
490 __be64 TOCT_prio_6;
491
492 __be64 TTTLOCT_prio_7;
493 __be64 TTTLOCT_NOFRM_prio_7;
494 __be64 TOCT_prio_7;
495
496 __be64 TTTLOCT_novlan;
497 __be64 TTTLOCT_NOFRM_novlan;
498 __be64 TOCT_novlan;
499
500 __be64 TTTLOCT_loopbk;
501 __be64 TTTLOCT_NOFRM_loopbk;
502 __be64 TOCT_loopbk;
503
504 /* Total frames transmitted with a good CRC that are not aborted */
505 __be64 TTOT_prio_0;
506 /* Total number of frames transmitted with 802.1Q encapsulation */
507 __be64 T1Q_prio_0;
508 __be64 reserved13;
509
510 __be64 TTOT_prio_1;
511 __be64 T1Q_prio_1;
512 __be64 reserved14;
513
514 __be64 TTOT_prio_2;
515 __be64 T1Q_prio_2;
516 __be64 reserved15;
517
518 __be64 TTOT_prio_3;
519 __be64 T1Q_prio_3;
520 __be64 reserved16;
521
522 __be64 TTOT_prio_4;
523 __be64 T1Q_prio_4;
524 __be64 reserved17;
525
526 __be64 TTOT_prio_5;
527 __be64 T1Q_prio_5;
528 __be64 reserved18;
529
530 __be64 TTOT_prio_6;
531 __be64 T1Q_prio_6;
532 __be64 reserved19;
533
534 __be64 TTOT_prio_7;
535 __be64 T1Q_prio_7;
536 __be64 reserved20;
537
538 __be64 TTOT_novlan;
539 __be64 T1Q_novlan;
540 __be64 reserved21;
541
542 __be64 TTOT_loopbk;
543 __be64 T1Q_loopbk;
544 __be64 reserved22;
545
546 /* Received frames with a length greater than MTU octets and a bad CRC */
547 __be32 RJBBR;
548 /* Received frames with a bad CRC that are not runts, jabbers,
549 or alignment errors */
550 __be32 RCRC;
551 /* Received frames with SFD with a length of less than 64 octets and a
552 bad CRC */
553 __be32 RRUNT;
554 /* Received frames with a length less than 64 octets and a good CRC */
555 __be32 RSHORT;
556 /* Total Number of Received Packets Dropped */
557 __be32 RDROP;
558 /* Drop due to overflow */
559 __be32 RdropOvflw;
560 /* Drop due to overflow */
561 __be32 RdropLength;
562 /* Total of good frames. Does not include frames received with
563 frame-too-long, FCS, or length errors */
564 __be32 RTOTFRMS;
565 /* Total dropped Xmited packets */
566 __be32 TDROP;
567};
568
569
570#endif
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c
new file mode 100644
index 000000000000..a0545209e507
--- /dev/null
+++ b/drivers/net/mlx4/en_resources.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/vmalloc.h>
35#include <linux/mlx4/qp.h>
36
37#include "mlx4_en.h"
38
39void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
40 int is_tx, int rss, int qpn, int cqn, int srqn,
41 struct mlx4_qp_context *context)
42{
43 struct mlx4_en_dev *mdev = priv->mdev;
44
45 memset(context, 0, sizeof *context);
46 context->flags = cpu_to_be32(7 << 16 | rss << 13);
47 context->pd = cpu_to_be32(mdev->priv_pdn);
48 context->mtu_msgmax = 0xff;
49 context->rq_size_stride = 0;
50 if (is_tx)
51 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
52 else
53 context->sq_size_stride = 1;
54 context->usr_page = cpu_to_be32(mdev->priv_uar.index);
55 context->local_qpn = cpu_to_be32(qpn);
56 context->pri_path.ackto = 1 & 0x07;
57 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
58 context->pri_path.counter_index = 0xff;
59 context->cqn_send = cpu_to_be32(cqn);
60 context->cqn_recv = cpu_to_be32(cqn);
61 context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
62 if (!rss)
63 context->srqn = cpu_to_be32(MLX4_EN_USE_SRQ | srqn);
64}
65
66
67int mlx4_en_map_buffer(struct mlx4_buf *buf)
68{
69 struct page **pages;
70 int i;
71
72 if (BITS_PER_LONG == 64 || buf->nbufs == 1)
73 return 0;
74
75 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
76 if (!pages)
77 return -ENOMEM;
78
79 for (i = 0; i < buf->nbufs; ++i)
80 pages[i] = virt_to_page(buf->page_list[i].buf);
81
82 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
83 kfree(pages);
84 if (!buf->direct.buf)
85 return -ENOMEM;
86
87 return 0;
88}
89
90void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
91{
92 if (BITS_PER_LONG == 64 || buf->nbufs == 1)
93 return;
94
95 vunmap(buf->direct.buf);
96}
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
new file mode 100644
index 000000000000..6232227f56c3
--- /dev/null
+++ b/drivers/net/mlx4/en_rx.c
@@ -0,0 +1,1080 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
36#include <linux/skbuff.h>
37#include <linux/if_ether.h>
38#include <linux/if_vlan.h>
39#include <linux/vmalloc.h>
40
41#include "mlx4_en.h"
42
43static void *get_wqe(struct mlx4_en_rx_ring *ring, int n)
44{
45 int offset = n << ring->srq.wqe_shift;
46 return ring->buf + offset;
47}
48
49static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
50{
51 return;
52}
53
54static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
55 void **ip_hdr, void **tcpudp_hdr,
56 u64 *hdr_flags, void *priv)
57{
58 *mac_hdr = page_address(frags->page) + frags->page_offset;
59 *ip_hdr = *mac_hdr + ETH_HLEN;
60 *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
61 *hdr_flags = LRO_IPV4 | LRO_TCP;
62
63 return 0;
64}
65
66static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
67 struct mlx4_en_rx_desc *rx_desc,
68 struct skb_frag_struct *skb_frags,
69 struct mlx4_en_rx_alloc *ring_alloc,
70 int i)
71{
72 struct mlx4_en_dev *mdev = priv->mdev;
73 struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
74 struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
75 struct page *page;
76 dma_addr_t dma;
77
78 if (page_alloc->offset == frag_info->last_offset) {
79 /* Allocate new page */
80 page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
81 if (!page)
82 return -ENOMEM;
83
84 skb_frags[i].page = page_alloc->page;
85 skb_frags[i].page_offset = page_alloc->offset;
86 page_alloc->page = page;
87 page_alloc->offset = frag_info->frag_align;
88 } else {
89 page = page_alloc->page;
90 get_page(page);
91
92 skb_frags[i].page = page;
93 skb_frags[i].page_offset = page_alloc->offset;
94 page_alloc->offset += frag_info->frag_stride;
95 }
96 dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
97 skb_frags[i].page_offset, frag_info->frag_size,
98 PCI_DMA_FROMDEVICE);
99 rx_desc->data[i].addr = cpu_to_be64(dma);
100 return 0;
101}
102
103static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
104 struct mlx4_en_rx_ring *ring)
105{
106 struct mlx4_en_rx_alloc *page_alloc;
107 int i;
108
109 for (i = 0; i < priv->num_frags; i++) {
110 page_alloc = &ring->page_alloc[i];
111 page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
112 MLX4_EN_ALLOC_ORDER);
113 if (!page_alloc->page)
114 goto out;
115
116 page_alloc->offset = priv->frag_info[i].frag_align;
117 mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
118 i, page_alloc->page);
119 }
120 return 0;
121
122out:
123 while (i--) {
124 page_alloc = &ring->page_alloc[i];
125 put_page(page_alloc->page);
126 page_alloc->page = NULL;
127 }
128 return -ENOMEM;
129}
130
131static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
132 struct mlx4_en_rx_ring *ring)
133{
134 struct mlx4_en_rx_alloc *page_alloc;
135 int i;
136
137 for (i = 0; i < priv->num_frags; i++) {
138 page_alloc = &ring->page_alloc[i];
139 mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
140 i, page_count(page_alloc->page));
141
142 put_page(page_alloc->page);
143 page_alloc->page = NULL;
144 }
145}
146
147
148static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
149 struct mlx4_en_rx_ring *ring, int index)
150{
151 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
152 struct skb_frag_struct *skb_frags = ring->rx_info +
153 (index << priv->log_rx_info);
154 int possible_frags;
155 int i;
156
157 /* Pre-link descriptor */
158 rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask);
159
160 /* Set size and memtype fields */
161 for (i = 0; i < priv->num_frags; i++) {
162 skb_frags[i].size = priv->frag_info[i].frag_size;
163 rx_desc->data[i].byte_count =
164 cpu_to_be32(priv->frag_info[i].frag_size);
165 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
166 }
167
168 /* If the number of used fragments does not fill up the ring stride,
169 * remaining (unused) fragments must be padded with null address/size
170 * and a special memory key */
171 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
172 for (i = priv->num_frags; i < possible_frags; i++) {
173 rx_desc->data[i].byte_count = 0;
174 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
175 rx_desc->data[i].addr = 0;
176 }
177}
178
179
180static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
181 struct mlx4_en_rx_ring *ring, int index)
182{
183 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
184 struct skb_frag_struct *skb_frags = ring->rx_info +
185 (index << priv->log_rx_info);
186 int i;
187
188 for (i = 0; i < priv->num_frags; i++)
189 if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
190 goto err;
191
192 return 0;
193
194err:
195 while (i--)
196 put_page(skb_frags[i].page);
197 return -ENOMEM;
198}
199
200static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
201{
202 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
203}
204
205static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
206{
207 struct mlx4_en_dev *mdev = priv->mdev;
208 struct mlx4_en_rx_ring *ring;
209 int ring_ind;
210 int buf_ind;
211
212 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
213 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
214 ring = &priv->rx_ring[ring_ind];
215
216 if (mlx4_en_prepare_rx_desc(priv, ring,
217 ring->actual_size)) {
218 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
219 mlx4_err(mdev, "Failed to allocate "
220 "enough rx buffers\n");
221 return -ENOMEM;
222 } else {
223 if (netif_msg_rx_err(priv))
224 mlx4_warn(mdev,
225 "Only %d buffers allocated\n",
226 ring->actual_size);
227 goto out;
228 }
229 }
230 ring->actual_size++;
231 ring->prod++;
232 }
233 }
234out:
235 return 0;
236}
237
238static int mlx4_en_fill_rx_buf(struct net_device *dev,
239 struct mlx4_en_rx_ring *ring)
240{
241 struct mlx4_en_priv *priv = netdev_priv(dev);
242 int num = 0;
243 int err;
244
245 while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
246 err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
247 ring->size_mask);
248 if (err) {
249 if (netif_msg_rx_err(priv))
250 mlx4_warn(priv->mdev,
251 "Failed preparing rx descriptor\n");
252 priv->port_stats.rx_alloc_failed++;
253 break;
254 }
255 ++num;
256 ++ring->prod;
257 }
258 if ((u32) (ring->prod - ring->cons) == ring->size)
259 ring->full = 1;
260
261 return num;
262}
263
264static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
265 struct mlx4_en_rx_ring *ring)
266{
267 struct mlx4_en_dev *mdev = priv->mdev;
268 struct skb_frag_struct *skb_frags;
269 struct mlx4_en_rx_desc *rx_desc;
270 dma_addr_t dma;
271 int index;
272 int nr;
273
274 mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
275 ring->cons, ring->prod);
276
277 /* Unmap and free Rx buffers */
278 BUG_ON((u32) (ring->prod - ring->cons) > ring->size);
279 while (ring->cons != ring->prod) {
280 index = ring->cons & ring->size_mask;
281 rx_desc = ring->buf + (index << ring->log_stride);
282 skb_frags = ring->rx_info + (index << priv->log_rx_info);
283 mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);
284
285 for (nr = 0; nr < priv->num_frags; nr++) {
286 mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
287 dma = be64_to_cpu(rx_desc->data[nr].addr);
288
289 mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
290 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
291 PCI_DMA_FROMDEVICE);
292 put_page(skb_frags[nr].page);
293 }
294 ++ring->cons;
295 }
296}
297
298
299void mlx4_en_rx_refill(struct work_struct *work)
300{
301 struct delayed_work *delay = container_of(work, struct delayed_work, work);
302 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
303 refill_task);
304 struct mlx4_en_dev *mdev = priv->mdev;
305 struct net_device *dev = priv->dev;
306 struct mlx4_en_rx_ring *ring;
307 int need_refill = 0;
308 int i;
309
310 mutex_lock(&mdev->state_lock);
311 if (!mdev->device_up || !priv->port_up)
312 goto out;
313
314 /* We only get here if there are no receive buffers, so we can't race
315 * with Rx interrupts while filling buffers */
316 for (i = 0; i < priv->rx_ring_num; i++) {
317 ring = &priv->rx_ring[i];
318 if (ring->need_refill) {
319 if (mlx4_en_fill_rx_buf(dev, ring)) {
320 ring->need_refill = 0;
321 mlx4_en_update_rx_prod_db(ring);
322 } else
323 need_refill = 1;
324 }
325 }
326 if (need_refill)
327 queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
328
329out:
330 mutex_unlock(&mdev->state_lock);
331}
332
333
334int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
335 struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
336{
337 struct mlx4_en_dev *mdev = priv->mdev;
338 int err;
339 int tmp;
340
341 /* Sanity check SRQ size before proceeding */
342 if (size >= mdev->dev->caps.max_srq_wqes)
343 return -EINVAL;
344
345 ring->prod = 0;
346 ring->cons = 0;
347 ring->size = size;
348 ring->size_mask = size - 1;
349 ring->stride = stride;
350 ring->log_stride = ffs(ring->stride) - 1;
351 ring->buf_size = ring->size * ring->stride;
352
353 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
354 sizeof(struct skb_frag_struct));
355 ring->rx_info = vmalloc(tmp);
356 if (!ring->rx_info) {
357 mlx4_err(mdev, "Failed allocating rx_info ring\n");
358 return -ENOMEM;
359 }
360 mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
361 ring->rx_info, tmp);
362
363 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
364 ring->buf_size, 2 * PAGE_SIZE);
365 if (err)
366 goto err_ring;
367
368 err = mlx4_en_map_buffer(&ring->wqres.buf);
369 if (err) {
370 mlx4_err(mdev, "Failed to map RX buffer\n");
371 goto err_hwq;
372 }
373 ring->buf = ring->wqres.buf.direct.buf;
374
375 /* Configure lro mngr */
376 memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
377 ring->lro.dev = priv->dev;
378 ring->lro.features = LRO_F_NAPI;
379 ring->lro.frag_align_pad = NET_IP_ALIGN;
380 ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
381 ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
382 ring->lro.max_desc = mdev->profile.num_lro;
383 ring->lro.max_aggr = MAX_SKB_FRAGS;
384 ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
385 sizeof(struct net_lro_desc),
386 GFP_KERNEL);
387 if (!ring->lro.lro_arr) {
388 mlx4_err(mdev, "Failed to allocate lro array\n");
389 goto err_map;
390 }
391 ring->lro.get_frag_header = mlx4_en_get_frag_header;
392
393 return 0;
394
395err_map:
396 mlx4_en_unmap_buffer(&ring->wqres.buf);
397err_hwq:
398 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
399err_ring:
400 vfree(ring->rx_info);
401 ring->rx_info = NULL;
402 return err;
403}
404
405int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
406{
407 struct mlx4_en_dev *mdev = priv->mdev;
408 struct mlx4_wqe_srq_next_seg *next;
409 struct mlx4_en_rx_ring *ring;
410 int i;
411 int ring_ind;
412 int err;
413 int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
414 DS_SIZE * priv->num_frags);
415 int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE;
416
417 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
418 ring = &priv->rx_ring[ring_ind];
419
420 ring->prod = 0;
421 ring->cons = 0;
422 ring->actual_size = 0;
423 ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
424
425 ring->stride = stride;
426 ring->log_stride = ffs(ring->stride) - 1;
427 ring->buf_size = ring->size * ring->stride;
428
429 memset(ring->buf, 0, ring->buf_size);
430 mlx4_en_update_rx_prod_db(ring);
431
432 /* Initailize all descriptors */
433 for (i = 0; i < ring->size; i++)
434 mlx4_en_init_rx_desc(priv, ring, i);
435
436 /* Initialize page allocators */
437 err = mlx4_en_init_allocator(priv, ring);
438 if (err) {
439 mlx4_err(mdev, "Failed initializing ring allocator\n");
440 goto err_allocator;
441 }
442
443 /* Fill Rx buffers */
444 ring->full = 0;
445 }
446 if (mlx4_en_fill_rx_buffers(priv))
447 goto err_buffers;
448
449 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
450 ring = &priv->rx_ring[ring_ind];
451
452 mlx4_en_update_rx_prod_db(ring);
453
454 /* Configure SRQ representing the ring */
455 ring->srq.max = ring->size;
456 ring->srq.max_gs = max_gs;
457 ring->srq.wqe_shift = ilog2(ring->stride);
458
459 for (i = 0; i < ring->srq.max; ++i) {
460 next = get_wqe(ring, i);
461 next->next_wqe_index =
462 cpu_to_be16((i + 1) & (ring->srq.max - 1));
463 }
464
465 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt,
466 ring->wqres.db.dma, &ring->srq);
467 if (err){
468 mlx4_err(mdev, "Failed to allocate srq\n");
469 goto err_srq;
470 }
471 ring->srq.event = mlx4_en_srq_event;
472 }
473
474 return 0;
475
476err_srq:
477 while (ring_ind >= 0) {
478 ring = &priv->rx_ring[ring_ind];
479 mlx4_srq_free(mdev->dev, &ring->srq);
480 ring_ind--;
481 }
482
483err_buffers:
484 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
485 mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
486
487 ring_ind = priv->rx_ring_num - 1;
488err_allocator:
489 while (ring_ind >= 0) {
490 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
491 ring_ind--;
492 }
493 return err;
494}
495
496void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
497 struct mlx4_en_rx_ring *ring)
498{
499 struct mlx4_en_dev *mdev = priv->mdev;
500
501 kfree(ring->lro.lro_arr);
502 mlx4_en_unmap_buffer(&ring->wqres.buf);
503 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
504 vfree(ring->rx_info);
505 ring->rx_info = NULL;
506}
507
508void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
509 struct mlx4_en_rx_ring *ring)
510{
511 struct mlx4_en_dev *mdev = priv->mdev;
512
513 mlx4_srq_free(mdev->dev, &ring->srq);
514 mlx4_en_free_rx_buf(priv, ring);
515 mlx4_en_destroy_allocator(priv, ring);
516}
517
518
519/* Unmap a completed descriptor and free unused pages */
520static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
521 struct mlx4_en_rx_desc *rx_desc,
522 struct skb_frag_struct *skb_frags,
523 struct skb_frag_struct *skb_frags_rx,
524 struct mlx4_en_rx_alloc *page_alloc,
525 int length)
526{
527 struct mlx4_en_dev *mdev = priv->mdev;
528 struct mlx4_en_frag_info *frag_info;
529 int nr;
530 dma_addr_t dma;
531
532 /* Collect used fragments while replacing them in the HW descirptors */
533 for (nr = 0; nr < priv->num_frags; nr++) {
534 frag_info = &priv->frag_info[nr];
535 if (length <= frag_info->frag_prefix_size)
536 break;
537
538 /* Save page reference in skb */
539 skb_frags_rx[nr].page = skb_frags[nr].page;
540 skb_frags_rx[nr].size = skb_frags[nr].size;
541 skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
542 dma = be64_to_cpu(rx_desc->data[nr].addr);
543
544 /* Allocate a replacement page */
545 if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
546 goto fail;
547
548 /* Unmap buffer */
549 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
550 PCI_DMA_FROMDEVICE);
551 }
552 /* Adjust size of last fragment to match actual length */
553 skb_frags_rx[nr - 1].size = length -
554 priv->frag_info[nr - 1].frag_prefix_size;
555 return nr;
556
557fail:
558 /* Drop all accumulated fragments (which have already been replaced in
559 * the descriptor) of this packet; remaining fragments are reused... */
560 while (nr > 0) {
561 nr--;
562 put_page(skb_frags_rx[nr].page);
563 }
564 return 0;
565}
566
567
568static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
569 struct mlx4_en_rx_desc *rx_desc,
570 struct skb_frag_struct *skb_frags,
571 struct mlx4_en_rx_alloc *page_alloc,
572 unsigned int length)
573{
574 struct mlx4_en_dev *mdev = priv->mdev;
575 struct sk_buff *skb;
576 void *va;
577 int used_frags;
578 dma_addr_t dma;
579
580 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
581 if (!skb) {
582 mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n");
583 return NULL;
584 }
585 skb->dev = priv->dev;
586 skb_reserve(skb, NET_IP_ALIGN);
587 skb->len = length;
588 skb->truesize = length + sizeof(struct sk_buff);
589
590 /* Get pointer to first fragment so we could copy the headers into the
591 * (linear part of the) skb */
592 va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
593
594 if (length <= SMALL_PACKET_SIZE) {
595 /* We are copying all relevant data to the skb - temporarily
596 * synch buffers for the copy */
597 dma = be64_to_cpu(rx_desc->data[0].addr);
598 dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
599 length, DMA_FROM_DEVICE);
600 skb_copy_to_linear_data(skb, va, length);
601 dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
602 length, DMA_FROM_DEVICE);
603 skb->tail += length;
604 } else {
605
606 /* Move relevant fragments to skb */
607 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
608 skb_shinfo(skb)->frags,
609 page_alloc, length);
610 skb_shinfo(skb)->nr_frags = used_frags;
611
612 /* Copy headers into the skb linear buffer */
613 memcpy(skb->data, va, HEADER_COPY_SIZE);
614 skb->tail += HEADER_COPY_SIZE;
615
616 /* Skip headers in first fragment */
617 skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
618
619 /* Adjust size of first fragment */
620 skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
621 skb->data_len = length - HEADER_COPY_SIZE;
622 }
623 return skb;
624}
625
626static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
627 struct mlx4_en_rx_ring *ring,
628 int from, int to, int num)
629{
630 struct skb_frag_struct *skb_frags_from;
631 struct skb_frag_struct *skb_frags_to;
632 struct mlx4_en_rx_desc *rx_desc_from;
633 struct mlx4_en_rx_desc *rx_desc_to;
634 int from_index, to_index;
635 int nr, i;
636
637 for (i = 0; i < num; i++) {
638 from_index = (from + i) & ring->size_mask;
639 to_index = (to + i) & ring->size_mask;
640 skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
641 skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
642 rx_desc_from = ring->buf + (from_index << ring->log_stride);
643 rx_desc_to = ring->buf + (to_index << ring->log_stride);
644
645 for (nr = 0; nr < priv->num_frags; nr++) {
646 skb_frags_to[nr].page = skb_frags_from[nr].page;
647 skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
648 rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
649 }
650 }
651}
652
653
654int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
655{
656 struct mlx4_en_priv *priv = netdev_priv(dev);
657 struct mlx4_en_dev *mdev = priv->mdev;
658 struct mlx4_cqe *cqe;
659 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
660 struct skb_frag_struct *skb_frags;
661 struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
662 struct mlx4_en_rx_desc *rx_desc;
663 struct sk_buff *skb;
664 int index;
665 int nr;
666 unsigned int length;
667 int polled = 0;
668 int ip_summed;
669
670 if (!priv->port_up)
671 return 0;
672
673 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
674 * descriptor offset can be deduced from the CQE index instead of
675 * reading 'cqe->index' */
676 index = cq->mcq.cons_index & ring->size_mask;
677 cqe = &cq->buf[index];
678
679 /* Process all completed CQEs */
680 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
681 cq->mcq.cons_index & cq->size)) {
682
683 skb_frags = ring->rx_info + (index << priv->log_rx_info);
684 rx_desc = ring->buf + (index << ring->log_stride);
685
686 /*
687 * make sure we read the CQE after we read the ownership bit
688 */
689 rmb();
690
691 /* Drop packet on bad receive or bad checksum */
692 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
693 MLX4_CQE_OPCODE_ERROR)) {
694 mlx4_err(mdev, "CQE completed in error - vendor "
695 "syndrom:%d syndrom:%d\n",
696 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
697 ((struct mlx4_err_cqe *) cqe)->syndrome);
698 goto next;
699 }
700 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
701 mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
702 goto next;
703 }
704
705 /*
706 * Packet is OK - process it.
707 */
708 length = be32_to_cpu(cqe->byte_cnt);
709 ring->bytes += length;
710 ring->packets++;
711
712 if (likely(priv->rx_csum)) {
713 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
714 (cqe->checksum == cpu_to_be16(0xffff))) {
715 priv->port_stats.rx_chksum_good++;
716 /* This packet is eligible for LRO if it is:
717 * - DIX Ethernet (type interpretation)
718 * - TCP/IP (v4)
719 * - without IP options
720 * - not an IP fragment */
721 if (mlx4_en_can_lro(cqe->status) &&
722 dev->features & NETIF_F_LRO) {
723
724 nr = mlx4_en_complete_rx_desc(
725 priv, rx_desc,
726 skb_frags, lro_frags,
727 ring->page_alloc, length);
728 if (!nr)
729 goto next;
730
731 if (priv->vlgrp && (cqe->vlan_my_qpn &
732 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
733 lro_vlan_hwaccel_receive_frags(
734 &ring->lro, lro_frags,
735 length, length,
736 priv->vlgrp,
737 be16_to_cpu(cqe->sl_vid),
738 NULL, 0);
739 } else
740 lro_receive_frags(&ring->lro,
741 lro_frags,
742 length,
743 length,
744 NULL, 0);
745
746 goto next;
747 }
748
749 /* LRO not possible, complete processing here */
750 ip_summed = CHECKSUM_UNNECESSARY;
751 INC_PERF_COUNTER(priv->pstats.lro_misses);
752 } else {
753 ip_summed = CHECKSUM_NONE;
754 priv->port_stats.rx_chksum_none++;
755 }
756 } else {
757 ip_summed = CHECKSUM_NONE;
758 priv->port_stats.rx_chksum_none++;
759 }
760
761 skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
762 ring->page_alloc, length);
763 if (!skb) {
764 priv->stats.rx_dropped++;
765 goto next;
766 }
767
768 skb->ip_summed = ip_summed;
769 skb->protocol = eth_type_trans(skb, dev);
770
771 /* Push it up the stack */
772 if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
773 MLX4_CQE_VLAN_PRESENT_MASK)) {
774 vlan_hwaccel_receive_skb(skb, priv->vlgrp,
775 be16_to_cpu(cqe->sl_vid));
776 } else
777 netif_receive_skb(skb);
778
779 dev->last_rx = jiffies;
780
781next:
782 ++cq->mcq.cons_index;
783 index = (cq->mcq.cons_index) & ring->size_mask;
784 cqe = &cq->buf[index];
785 if (++polled == budget) {
786 /* We are here because we reached the NAPI budget -
787 * flush only pending LRO sessions */
788 lro_flush_all(&ring->lro);
789 goto out;
790 }
791 }
792
793 /* If CQ is empty flush all LRO sessions unconditionally */
794 lro_flush_all(&ring->lro);
795
796out:
797 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
798 mlx4_cq_set_ci(&cq->mcq);
799 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
800 ring->cons = cq->mcq.cons_index;
801 ring->prod += polled; /* Polled descriptors were realocated in place */
802 if (unlikely(!ring->full)) {
803 mlx4_en_copy_desc(priv, ring, ring->cons - polled,
804 ring->prod - polled, polled);
805 mlx4_en_fill_rx_buf(dev, ring);
806 }
807 mlx4_en_update_rx_prod_db(ring);
808 return polled;
809}
810
811
812void mlx4_en_rx_irq(struct mlx4_cq *mcq)
813{
814 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
815 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
816
817 if (priv->port_up)
818 netif_rx_schedule(cq->dev, &cq->napi);
819 else
820 mlx4_en_arm_cq(priv, cq);
821}
822
823/* Rx CQ polling - called by NAPI */
824int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
825{
826 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
827 struct net_device *dev = cq->dev;
828 struct mlx4_en_priv *priv = netdev_priv(dev);
829 int done;
830
831 done = mlx4_en_process_rx_cq(dev, cq, budget);
832
833 /* If we used up all the quota - we're probably not done yet... */
834 if (done == budget)
835 INC_PERF_COUNTER(priv->pstats.napi_quota);
836 else {
837 /* Done for now */
838 netif_rx_complete(dev, napi);
839 mlx4_en_arm_cq(priv, cq);
840 }
841 return done;
842}
843
844
845/* Calculate the last offset position that accomodates a full fragment
846 * (assuming fagment size = stride-align) */
847static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
848{
849 u16 res = MLX4_EN_ALLOC_SIZE % stride;
850 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
851
852 mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
853 "res:%d offset:%d\n", stride, align, res, offset);
854 return offset;
855}
856
857
858static int frag_sizes[] = {
859 FRAG_SZ0,
860 FRAG_SZ1,
861 FRAG_SZ2,
862 FRAG_SZ3
863};
864
865void mlx4_en_calc_rx_buf(struct net_device *dev)
866{
867 struct mlx4_en_priv *priv = netdev_priv(dev);
868 int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
869 int buf_size = 0;
870 int i = 0;
871
872 while (buf_size < eff_mtu) {
873 priv->frag_info[i].frag_size =
874 (eff_mtu > buf_size + frag_sizes[i]) ?
875 frag_sizes[i] : eff_mtu - buf_size;
876 priv->frag_info[i].frag_prefix_size = buf_size;
877 if (!i) {
878 priv->frag_info[i].frag_align = NET_IP_ALIGN;
879 priv->frag_info[i].frag_stride =
880 ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
881 } else {
882 priv->frag_info[i].frag_align = 0;
883 priv->frag_info[i].frag_stride =
884 ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
885 }
886 priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
887 priv, priv->frag_info[i].frag_stride,
888 priv->frag_info[i].frag_align);
889 buf_size += priv->frag_info[i].frag_size;
890 i++;
891 }
892
893 priv->num_frags = i;
894 priv->rx_skb_size = eff_mtu;
895 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
896
897 mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
898 "num_frags:%d):\n", eff_mtu, priv->num_frags);
899 for (i = 0; i < priv->num_frags; i++) {
900 mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
901 "stride:%d last_offset:%d\n", i,
902 priv->frag_info[i].frag_size,
903 priv->frag_info[i].frag_prefix_size,
904 priv->frag_info[i].frag_align,
905 priv->frag_info[i].frag_stride,
906 priv->frag_info[i].last_offset);
907 }
908}
909
910/* RSS related functions */
911
912/* Calculate rss size and map each entry in rss table to rx ring */
913void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
914 struct mlx4_en_rss_map *rss_map,
915 int num_entries, int num_rings)
916{
917 int i;
918
919 rss_map->size = roundup_pow_of_two(num_entries);
920 mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
921 rss_map->size);
922
923 for (i = 0; i < rss_map->size; i++) {
924 rss_map->map[i] = i % num_rings;
925 mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
926 }
927}
928
929static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
930{
931 return;
932}
933
934
935static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
936 int qpn, int srqn, int cqn,
937 enum mlx4_qp_state *state,
938 struct mlx4_qp *qp)
939{
940 struct mlx4_en_dev *mdev = priv->mdev;
941 struct mlx4_qp_context *context;
942 int err = 0;
943
944 context = kmalloc(sizeof *context , GFP_KERNEL);
945 if (!context) {
946 mlx4_err(mdev, "Failed to allocate qp context\n");
947 return -ENOMEM;
948 }
949
950 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
951 if (err) {
952 mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn);
953 goto out;
954 return err;
955 }
956 qp->event = mlx4_en_sqp_event;
957
958 memset(context, 0, sizeof *context);
959 mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context);
960
961 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state);
962 if (err) {
963 mlx4_qp_remove(mdev->dev, qp);
964 mlx4_qp_free(mdev->dev, qp);
965 }
966out:
967 kfree(context);
968 return err;
969}
970
971/* Allocate rx qp's and configure them according to rss map */
972int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
973{
974 struct mlx4_en_dev *mdev = priv->mdev;
975 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
976 struct mlx4_qp_context context;
977 struct mlx4_en_rss_context *rss_context;
978 void *ptr;
979 int rss_xor = mdev->profile.rss_xor;
980 u8 rss_mask = mdev->profile.rss_mask;
981 int i, srqn, qpn, cqn;
982 int err = 0;
983 int good_qps = 0;
984
985 mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port);
986 err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
987 rss_map->size, &rss_map->base_qpn);
988 if (err) {
989 mlx4_err(mdev, "Failed reserving %d qps for port %u\n",
990 rss_map->size, priv->port);
991 return err;
992 }
993
994 for (i = 0; i < rss_map->size; i++) {
995 cqn = priv->rx_ring[rss_map->map[i]].cqn;
996 srqn = priv->rx_ring[rss_map->map[i]].srq.srqn;
997 qpn = rss_map->base_qpn + i;
998 err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn,
999 &rss_map->state[i],
1000 &rss_map->qps[i]);
1001 if (err)
1002 goto rss_err;
1003
1004 ++good_qps;
1005 }
1006
1007 /* Configure RSS indirection qp */
1008 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
1009 if (err) {
1010 mlx4_err(mdev, "Failed to reserve range for RSS "
1011 "indirection qp\n");
1012 goto rss_err;
1013 }
1014 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
1015 if (err) {
1016 mlx4_err(mdev, "Failed to allocate RSS indirection QP\n");
1017 goto reserve_err;
1018 }
1019 rss_map->indir_qp.event = mlx4_en_sqp_event;
1020 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1021 priv->rx_ring[0].cqn, 0, &context);
1022
1023 ptr = ((void *) &context) + 0x3c;
1024 rss_context = (struct mlx4_en_rss_context *) ptr;
1025 rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size) << 24 |
1026 (rss_map->base_qpn));
1027 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
1028 rss_context->hash_fn = rss_xor & 0x3;
1029 rss_context->flags = rss_mask << 2;
1030
1031 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
1032 &rss_map->indir_qp, &rss_map->indir_state);
1033 if (err)
1034 goto indir_err;
1035
1036 return 0;
1037
1038indir_err:
1039 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1040 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
1041 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
1042 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
1043reserve_err:
1044 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
1045rss_err:
1046 for (i = 0; i < good_qps; i++) {
1047 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1048 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1049 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1050 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1051 }
1052 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
1053 return err;
1054}
1055
1056void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
1057{
1058 struct mlx4_en_dev *mdev = priv->mdev;
1059 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1060 int i;
1061
1062 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1063 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
1064 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
1065 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
1066 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
1067
1068 for (i = 0; i < rss_map->size; i++) {
1069 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1070 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1071 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1072 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1073 }
1074 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
1075}
1076
1077
1078
1079
1080
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
new file mode 100644
index 000000000000..8592f8fb8475
--- /dev/null
+++ b/drivers/net/mlx4/en_tx.c
@@ -0,0 +1,820 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <asm/page.h>
35#include <linux/mlx4/cq.h>
36#include <linux/mlx4/qp.h>
37#include <linux/skbuff.h>
38#include <linux/if_vlan.h>
39#include <linux/vmalloc.h>
40
41#include "mlx4_en.h"
42
43enum {
44 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
45};
46
47static int inline_thold __read_mostly = MAX_INLINE;
48
49module_param_named(inline_thold, inline_thold, int, 0444);
50MODULE_PARM_DESC(inline_thold, "treshold for using inline data");
51
52int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
53 struct mlx4_en_tx_ring *ring, u32 size,
54 u16 stride)
55{
56 struct mlx4_en_dev *mdev = priv->mdev;
57 int tmp;
58 int err;
59
60 ring->size = size;
61 ring->size_mask = size - 1;
62 ring->stride = stride;
63
64 inline_thold = min(inline_thold, MAX_INLINE);
65
66 spin_lock_init(&ring->comp_lock);
67
68 tmp = size * sizeof(struct mlx4_en_tx_info);
69 ring->tx_info = vmalloc(tmp);
70 if (!ring->tx_info) {
71 mlx4_err(mdev, "Failed allocating tx_info ring\n");
72 return -ENOMEM;
73 }
74 mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
75 ring->tx_info, tmp);
76
77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
78 if (!ring->bounce_buf) {
79 mlx4_err(mdev, "Failed allocating bounce buffer\n");
80 err = -ENOMEM;
81 goto err_tx;
82 }
83 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
84
85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
86 2 * PAGE_SIZE);
87 if (err) {
88 mlx4_err(mdev, "Failed allocating hwq resources\n");
89 goto err_bounce;
90 }
91
92 err = mlx4_en_map_buffer(&ring->wqres.buf);
93 if (err) {
94 mlx4_err(mdev, "Failed to map TX buffer\n");
95 goto err_hwq_res;
96 }
97
98 ring->buf = ring->wqres.buf.direct.buf;
99
100 mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
103
104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
105 if (err) {
106 mlx4_err(mdev, "Failed reserving qp for tx ring.\n");
107 goto err_map;
108 }
109
110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
111 if (err) {
112 mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
113 goto err_reserve;
114 }
115
116 return 0;
117
118err_reserve:
119 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
120err_map:
121 mlx4_en_unmap_buffer(&ring->wqres.buf);
122err_hwq_res:
123 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
124err_bounce:
125 kfree(ring->bounce_buf);
126 ring->bounce_buf = NULL;
127err_tx:
128 vfree(ring->tx_info);
129 ring->tx_info = NULL;
130 return err;
131}
132
133void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
134 struct mlx4_en_tx_ring *ring)
135{
136 struct mlx4_en_dev *mdev = priv->mdev;
137 mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
138
139 mlx4_qp_remove(mdev->dev, &ring->qp);
140 mlx4_qp_free(mdev->dev, &ring->qp);
141 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
142 mlx4_en_unmap_buffer(&ring->wqres.buf);
143 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
144 kfree(ring->bounce_buf);
145 ring->bounce_buf = NULL;
146 vfree(ring->tx_info);
147 ring->tx_info = NULL;
148}
149
150int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
151 struct mlx4_en_tx_ring *ring,
152 int cq, int srqn)
153{
154 struct mlx4_en_dev *mdev = priv->mdev;
155 int err;
156
157 ring->cqn = cq;
158 ring->prod = 0;
159 ring->cons = 0xffffffff;
160 ring->last_nr_txbb = 1;
161 ring->poll_cnt = 0;
162 ring->blocked = 0;
163 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
164 memset(ring->buf, 0, ring->buf_size);
165
166 ring->qp_state = MLX4_QP_STATE_RST;
167 ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
168
169 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
170 ring->cqn, srqn, &ring->context);
171
172 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
173 &ring->qp, &ring->qp_state);
174
175 return err;
176}
177
178void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
179 struct mlx4_en_tx_ring *ring)
180{
181 struct mlx4_en_dev *mdev = priv->mdev;
182
183 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
184 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
185}
186
187
188static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
189 struct mlx4_en_tx_ring *ring,
190 int index, u8 owner)
191{
192 struct mlx4_en_dev *mdev = priv->mdev;
193 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
194 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
195 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
196 struct sk_buff *skb = tx_info->skb;
197 struct skb_frag_struct *frag;
198 void *end = ring->buf + ring->buf_size;
199 int frags = skb_shinfo(skb)->nr_frags;
200 int i;
201 __be32 *ptr = (__be32 *)tx_desc;
202 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
203
204 /* Optimize the common case when there are no wraparounds */
205 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
206 if (tx_info->linear) {
207 pci_unmap_single(mdev->pdev,
208 (dma_addr_t) be64_to_cpu(data->addr),
209 be32_to_cpu(data->byte_count),
210 PCI_DMA_TODEVICE);
211 ++data;
212 }
213
214 for (i = 0; i < frags; i++) {
215 frag = &skb_shinfo(skb)->frags[i];
216 pci_unmap_page(mdev->pdev,
217 (dma_addr_t) be64_to_cpu(data[i].addr),
218 frag->size, PCI_DMA_TODEVICE);
219 }
220 /* Stamp the freed descriptor */
221 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
222 *ptr = stamp;
223 ptr += STAMP_DWORDS;
224 }
225
226 } else {
227 if ((void *) data >= end) {
228 data = (struct mlx4_wqe_data_seg *)
229 (ring->buf + ((void *) data - end));
230 }
231
232 if (tx_info->linear) {
233 pci_unmap_single(mdev->pdev,
234 (dma_addr_t) be64_to_cpu(data->addr),
235 be32_to_cpu(data->byte_count),
236 PCI_DMA_TODEVICE);
237 ++data;
238 }
239
240 for (i = 0; i < frags; i++) {
241 /* Check for wraparound before unmapping */
242 if ((void *) data >= end)
243 data = (struct mlx4_wqe_data_seg *) ring->buf;
244 frag = &skb_shinfo(skb)->frags[i];
245 pci_unmap_page(mdev->pdev,
246 (dma_addr_t) be64_to_cpu(data->addr),
247 frag->size, PCI_DMA_TODEVICE);
248 }
249 /* Stamp the freed descriptor */
250 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
251 *ptr = stamp;
252 ptr += STAMP_DWORDS;
253 if ((void *) ptr >= end) {
254 ptr = ring->buf;
255 stamp ^= cpu_to_be32(0x80000000);
256 }
257 }
258
259 }
260 dev_kfree_skb_any(skb);
261 return tx_info->nr_txbb;
262}
263
264
265int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
266{
267 struct mlx4_en_priv *priv = netdev_priv(dev);
268 int cnt = 0;
269
270 /* Skip last polled descriptor */
271 ring->cons += ring->last_nr_txbb;
272 mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
273 ring->cons, ring->prod);
274
275 if ((u32) (ring->prod - ring->cons) > ring->size) {
276 if (netif_msg_tx_err(priv))
277 mlx4_warn(priv->mdev, "Tx consumer passed producer!\n");
278 return 0;
279 }
280
281 while (ring->cons != ring->prod) {
282 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
283 ring->cons & ring->size_mask,
284 !!(ring->cons & ring->size));
285 ring->cons += ring->last_nr_txbb;
286 cnt++;
287 }
288
289 if (cnt)
290 mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
291
292 return cnt;
293}
294
295void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
296{
297 int block = 8 / ring_num;
298 int extra = 8 - (block * ring_num);
299 int num = 0;
300 u16 ring = 1;
301 int prio;
302
303 if (ring_num == 1) {
304 for (prio = 0; prio < 8; prio++)
305 prio_map[prio] = 0;
306 return;
307 }
308
309 for (prio = 0; prio < 8; prio++) {
310 if (extra && (num == block + 1)) {
311 ring++;
312 num = 0;
313 extra--;
314 } else if (!extra && (num == block)) {
315 ring++;
316 num = 0;
317 }
318 prio_map[prio] = ring;
319 mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
320 num++;
321 }
322}
323
324static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
325{
326 struct mlx4_en_priv *priv = netdev_priv(dev);
327 struct mlx4_cq *mcq = &cq->mcq;
328 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
329 struct mlx4_cqe *cqe = cq->buf;
330 u16 index;
331 u16 new_index;
332 u32 txbbs_skipped = 0;
333 u32 cq_last_sav;
334
335 /* index always points to the first TXBB of the last polled descriptor */
336 index = ring->cons & ring->size_mask;
337 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
338 if (index == new_index)
339 return;
340
341 if (!priv->port_up)
342 return;
343
344 /*
345 * We use a two-stage loop:
346 * - the first samples the HW-updated CQE
347 * - the second frees TXBBs until the last sample
348 * This lets us amortize CQE cache misses, while still polling the CQ
349 * until is quiescent.
350 */
351 cq_last_sav = mcq->cons_index;
352 do {
353 do {
354 /* Skip over last polled CQE */
355 index = (index + ring->last_nr_txbb) & ring->size_mask;
356 txbbs_skipped += ring->last_nr_txbb;
357
358 /* Poll next CQE */
359 ring->last_nr_txbb = mlx4_en_free_tx_desc(
360 priv, ring, index,
361 !!((ring->cons + txbbs_skipped) &
362 ring->size));
363 ++mcq->cons_index;
364
365 } while (index != new_index);
366
367 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
368 } while (index != new_index);
369 AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
370 (u32) (mcq->cons_index - cq_last_sav));
371
372 /*
373 * To prevent CQ overflow we first update CQ consumer and only then
374 * the ring consumer.
375 */
376 mlx4_cq_set_ci(mcq);
377 wmb();
378 ring->cons += txbbs_skipped;
379
380 /* Wakeup Tx queue if this ring stopped it */
381 if (unlikely(ring->blocked)) {
382 if (((u32) (ring->prod - ring->cons) <=
383 ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) {
384
385 /* TODO: support multiqueue netdevs. Currently, we block
386 * when *any* ring is full. Note that:
387 * - 2 Tx rings can unblock at the same time and call
388 * netif_wake_queue(), which is OK since this
389 * operation is idempotent.
390 * - We might wake the queue just after another ring
391 * stopped it. This is no big deal because the next
392 * transmission on that ring would stop the queue.
393 */
394 ring->blocked = 0;
395 netif_wake_queue(dev);
396 priv->port_stats.wake_queue++;
397 }
398 }
399}
400
401void mlx4_en_tx_irq(struct mlx4_cq *mcq)
402{
403 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
404 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
405 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
406
407 spin_lock_irq(&ring->comp_lock);
408 cq->armed = 0;
409 mlx4_en_process_tx_cq(cq->dev, cq);
410 if (ring->blocked)
411 mlx4_en_arm_cq(priv, cq);
412 else
413 mod_timer(&cq->timer, jiffies + 1);
414 spin_unlock_irq(&ring->comp_lock);
415}
416
417
418void mlx4_en_poll_tx_cq(unsigned long data)
419{
420 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
421 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
422 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
423 u32 inflight;
424
425 INC_PERF_COUNTER(priv->pstats.tx_poll);
426
427 netif_tx_lock(priv->dev);
428 spin_lock_irq(&ring->comp_lock);
429 mlx4_en_process_tx_cq(cq->dev, cq);
430 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
431
432 /* If there are still packets in flight and the timer has not already
433 * been scheduled by the Tx routine then schedule it here to guarantee
434 * completion processing of these packets */
435 if (inflight && priv->port_up)
436 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
437
438 spin_unlock_irq(&ring->comp_lock);
439 netif_tx_unlock(priv->dev);
440}
441
442static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
443 struct mlx4_en_tx_ring *ring,
444 u32 index,
445 unsigned int desc_size)
446{
447 u32 copy = (ring->size - index) * TXBB_SIZE;
448 int i;
449
450 for (i = desc_size - copy - 4; i >= 0; i -= 4) {
451 if ((i & (TXBB_SIZE - 1)) == 0)
452 wmb();
453
454 *((u32 *) (ring->buf + i)) =
455 *((u32 *) (ring->bounce_buf + copy + i));
456 }
457
458 for (i = copy - 4; i >= 4 ; i -= 4) {
459 if ((i & (TXBB_SIZE - 1)) == 0)
460 wmb();
461
462 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
463 *((u32 *) (ring->bounce_buf + i));
464 }
465
466 /* Return real descriptor location */
467 return ring->buf + index * TXBB_SIZE;
468}
469
470static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
471{
472 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
473 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
474
475 /* If we don't have a pending timer, set one up to catch our recent
476 post in case the interface becomes idle */
477 if (!timer_pending(&cq->timer))
478 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
479
480 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
481 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
482 mlx4_en_process_tx_cq(priv->dev, cq);
483}
484
485static void *get_frag_ptr(struct sk_buff *skb)
486{
487 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
488 struct page *page = frag->page;
489 void *ptr;
490
491 ptr = page_address(page);
492 if (unlikely(!ptr))
493 return NULL;
494
495 return ptr + frag->page_offset;
496}
497
498static int is_inline(struct sk_buff *skb, void **pfrag)
499{
500 void *ptr;
501
502 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
503 if (skb_shinfo(skb)->nr_frags == 1) {
504 ptr = get_frag_ptr(skb);
505 if (unlikely(!ptr))
506 return 0;
507
508 if (pfrag)
509 *pfrag = ptr;
510
511 return 1;
512 } else if (unlikely(skb_shinfo(skb)->nr_frags))
513 return 0;
514 else
515 return 1;
516 }
517
518 return 0;
519}
520
521static int inline_size(struct sk_buff *skb)
522{
523 if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
524 <= MLX4_INLINE_ALIGN)
525 return ALIGN(skb->len + CTRL_SIZE +
526 sizeof(struct mlx4_wqe_inline_seg), 16);
527 else
528 return ALIGN(skb->len + CTRL_SIZE + 2 *
529 sizeof(struct mlx4_wqe_inline_seg), 16);
530}
531
532static int get_real_size(struct sk_buff *skb, struct net_device *dev,
533 int *lso_header_size)
534{
535 struct mlx4_en_priv *priv = netdev_priv(dev);
536 struct mlx4_en_dev *mdev = priv->mdev;
537 int real_size;
538
539 if (skb_is_gso(skb)) {
540 *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
541 real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
542 ALIGN(*lso_header_size + 4, DS_SIZE);
543 if (unlikely(*lso_header_size != skb_headlen(skb))) {
544 /* We add a segment for the skb linear buffer only if
545 * it contains data */
546 if (*lso_header_size < skb_headlen(skb))
547 real_size += DS_SIZE;
548 else {
549 if (netif_msg_tx_err(priv))
550 mlx4_warn(mdev, "Non-linear headers\n");
551 dev_kfree_skb_any(skb);
552 return 0;
553 }
554 }
555 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
556 if (netif_msg_tx_err(priv))
557 mlx4_warn(mdev, "LSO header size too big\n");
558 dev_kfree_skb_any(skb);
559 return 0;
560 }
561 } else {
562 *lso_header_size = 0;
563 if (!is_inline(skb, NULL))
564 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
565 else
566 real_size = inline_size(skb);
567 }
568
569 return real_size;
570}
571
572static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
573 int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
574{
575 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
576 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
577
578 if (skb->len <= spc) {
579 inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
580 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
581 if (skb_shinfo(skb)->nr_frags)
582 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
583 skb_shinfo(skb)->frags[0].size);
584
585 } else {
586 inl->byte_count = cpu_to_be32(1 << 31 | spc);
587 if (skb_headlen(skb) <= spc) {
588 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
589 if (skb_headlen(skb) < spc) {
590 memcpy(((void *)(inl + 1)) + skb_headlen(skb),
591 fragptr, spc - skb_headlen(skb));
592 fragptr += spc - skb_headlen(skb);
593 }
594 inl = (void *) (inl + 1) + spc;
595 memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
596 } else {
597 skb_copy_from_linear_data(skb, inl + 1, spc);
598 inl = (void *) (inl + 1) + spc;
599 skb_copy_from_linear_data_offset(skb, spc, inl + 1,
600 skb_headlen(skb) - spc);
601 if (skb_shinfo(skb)->nr_frags)
602 memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
603 fragptr, skb_shinfo(skb)->frags[0].size);
604 }
605
606 wmb();
607 inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
608 }
609 tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
610 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
611 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
612}
613
614static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb,
615 u16 *vlan_tag)
616{
617 int tx_ind;
618
619 /* Obtain VLAN information if present */
620 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
621 *vlan_tag = vlan_tx_tag_get(skb);
622 /* Set the Tx ring to use according to vlan priority */
623 tx_ind = priv->tx_prio_map[*vlan_tag >> 13];
624 } else {
625 *vlan_tag = 0;
626 tx_ind = 0;
627 }
628 return tx_ind;
629}
630
631int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
632{
633 struct mlx4_en_priv *priv = netdev_priv(dev);
634 struct mlx4_en_dev *mdev = priv->mdev;
635 struct mlx4_en_tx_ring *ring;
636 struct mlx4_en_cq *cq;
637 struct mlx4_en_tx_desc *tx_desc;
638 struct mlx4_wqe_data_seg *data;
639 struct skb_frag_struct *frag;
640 struct mlx4_en_tx_info *tx_info;
641 int tx_ind = 0;
642 int nr_txbb;
643 int desc_size;
644 int real_size;
645 dma_addr_t dma;
646 u32 index;
647 __be32 op_own;
648 u16 vlan_tag;
649 int i;
650 int lso_header_size;
651 void *fragptr;
652
653 if (unlikely(!skb->len)) {
654 dev_kfree_skb_any(skb);
655 return NETDEV_TX_OK;
656 }
657 real_size = get_real_size(skb, dev, &lso_header_size);
658 if (unlikely(!real_size))
659 return NETDEV_TX_OK;
660
661 /* Allign descriptor to TXBB size */
662 desc_size = ALIGN(real_size, TXBB_SIZE);
663 nr_txbb = desc_size / TXBB_SIZE;
664 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
665 if (netif_msg_tx_err(priv))
666 mlx4_warn(mdev, "Oversized header or SG list\n");
667 dev_kfree_skb_any(skb);
668 return NETDEV_TX_OK;
669 }
670
671 tx_ind = get_vlan_info(priv, skb, &vlan_tag);
672 ring = &priv->tx_ring[tx_ind];
673
674 /* Check available TXBBs And 2K spare for prefetch */
675 if (unlikely(((int)(ring->prod - ring->cons)) >
676 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
677 /* every full Tx ring stops queue.
678 * TODO: implement multi-queue support (per-queue stop) */
679 netif_stop_queue(dev);
680 ring->blocked = 1;
681 priv->port_stats.queue_stopped++;
682
683 /* Use interrupts to find out when queue opened */
684 cq = &priv->tx_cq[tx_ind];
685 mlx4_en_arm_cq(priv, cq);
686 return NETDEV_TX_BUSY;
687 }
688
689 /* Now that we know what Tx ring to use */
690 if (unlikely(!priv->port_up)) {
691 if (netif_msg_tx_err(priv))
692 mlx4_warn(mdev, "xmit: port down!\n");
693 dev_kfree_skb_any(skb);
694 return NETDEV_TX_OK;
695 }
696
697 /* Track current inflight packets for performance analysis */
698 AVG_PERF_COUNTER(priv->pstats.inflight_avg,
699 (u32) (ring->prod - ring->cons - 1));
700
701 /* Packet is good - grab an index and transmit it */
702 index = ring->prod & ring->size_mask;
703
704 /* See if we have enough space for whole descriptor TXBB for setting
705 * SW ownership on next descriptor; if not, use a bounce buffer. */
706 if (likely(index + nr_txbb <= ring->size))
707 tx_desc = ring->buf + index * TXBB_SIZE;
708 else
709 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
710
711 /* Save skb in tx_info ring */
712 tx_info = &ring->tx_info[index];
713 tx_info->skb = skb;
714 tx_info->nr_txbb = nr_txbb;
715
716 /* Prepare ctrl segement apart opcode+ownership, which depends on
717 * whether LSO is used */
718 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
719 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
720 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
721 tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
722 MLX4_WQE_CTRL_SOLICITED);
723 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
724 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
725 MLX4_WQE_CTRL_TCP_UDP_CSUM);
726 priv->port_stats.tx_chksum_offload++;
727 }
728
729 /* Handle LSO (TSO) packets */
730 if (lso_header_size) {
731 /* Mark opcode as LSO */
732 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
733 ((ring->prod & ring->size) ?
734 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
735
736 /* Fill in the LSO prefix */
737 tx_desc->lso.mss_hdr_size = cpu_to_be32(
738 skb_shinfo(skb)->gso_size << 16 | lso_header_size);
739
740 /* Copy headers;
741 * note that we already verified that it is linear */
742 memcpy(tx_desc->lso.header, skb->data, lso_header_size);
743 data = ((void *) &tx_desc->lso +
744 ALIGN(lso_header_size + 4, DS_SIZE));
745
746 priv->port_stats.tso_packets++;
747 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
748 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
749 ring->bytes += skb->len + (i - 1) * lso_header_size;
750 ring->packets += i;
751 } else {
752 /* Normal (Non LSO) packet */
753 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
754 ((ring->prod & ring->size) ?
755 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
756 data = &tx_desc->data;
757 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
758 ring->packets++;
759
760 }
761 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
762
763
764 /* valid only for none inline segments */
765 tx_info->data_offset = (void *) data - (void *) tx_desc;
766
767 tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
768 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
769
770 if (!is_inline(skb, &fragptr)) {
771 /* Map fragments */
772 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
773 frag = &skb_shinfo(skb)->frags[i];
774 dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
775 frag->size, PCI_DMA_TODEVICE);
776 data->addr = cpu_to_be64(dma);
777 data->lkey = cpu_to_be32(mdev->mr.key);
778 wmb();
779 data->byte_count = cpu_to_be32(frag->size);
780 --data;
781 }
782
783 /* Map linear part */
784 if (tx_info->linear) {
785 dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
786 skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
787 data->addr = cpu_to_be64(dma);
788 data->lkey = cpu_to_be32(mdev->mr.key);
789 wmb();
790 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
791 }
792 } else
793 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
794
795 ring->prod += nr_txbb;
796
797 /* If we used a bounce buffer then copy descriptor back into place */
798 if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
799 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
800
801 /* Run destructor before passing skb to HW */
802 if (likely(!skb_shared(skb)))
803 skb_orphan(skb);
804
805 /* Ensure new descirptor hits memory
806 * before setting ownership of this descriptor to HW */
807 wmb();
808 tx_desc->ctrl.owner_opcode = op_own;
809
810 /* Ring doorbell! */
811 wmb();
812 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
813 dev->trans_start = jiffies;
814
815 /* Poll CQ here */
816 mlx4_en_xmit_poll(priv, tx_ind);
817
818 return 0;
819}
820
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8a8b56135a58..de169338cd90 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -558,7 +558,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
558 int i; 558 int i;
559 559
560 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, 560 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
561 dev->caps.num_eqs - 1, dev->caps.reserved_eqs); 561 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
562 if (err) 562 if (err)
563 return err; 563 return err;
564 564
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7e32955da982..be09fdb79cb8 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -88,6 +88,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
88 [ 8] = "P_Key violation counter", 88 [ 8] = "P_Key violation counter",
89 [ 9] = "Q_Key violation counter", 89 [ 9] = "Q_Key violation counter",
90 [10] = "VMM", 90 [10] = "VMM",
91 [12] = "DPDP",
91 [16] = "MW support", 92 [16] = "MW support",
92 [17] = "APM support", 93 [17] = "APM support",
93 [18] = "Atomic ops support", 94 [18] = "Atomic ops support",
@@ -346,7 +347,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
346 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET); 347 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
347 dev_cap->max_vl[i] = field >> 4; 348 dev_cap->max_vl[i] = field >> 4;
348 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET); 349 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
349 dev_cap->max_mtu[i] = field >> 4; 350 dev_cap->ib_mtu[i] = field >> 4;
350 dev_cap->max_port_width[i] = field & 0xf; 351 dev_cap->max_port_width[i] = field & 0xf;
351 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET); 352 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
352 dev_cap->max_gids[i] = 1 << (field & 0xf); 353 dev_cap->max_gids[i] = 1 << (field & 0xf);
@@ -354,9 +355,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
354 dev_cap->max_pkeys[i] = 1 << (field & 0xf); 355 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
355 } 356 }
356 } else { 357 } else {
358#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
357#define QUERY_PORT_MTU_OFFSET 0x01 359#define QUERY_PORT_MTU_OFFSET 0x01
360#define QUERY_PORT_ETH_MTU_OFFSET 0x02
358#define QUERY_PORT_WIDTH_OFFSET 0x06 361#define QUERY_PORT_WIDTH_OFFSET 0x06
359#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07 362#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
363#define QUERY_PORT_MAC_OFFSET 0x08
364#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
360#define QUERY_PORT_MAX_VL_OFFSET 0x0b 365#define QUERY_PORT_MAX_VL_OFFSET 0x0b
361 366
362 for (i = 1; i <= dev_cap->num_ports; ++i) { 367 for (i = 1; i <= dev_cap->num_ports; ++i) {
@@ -365,8 +370,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
365 if (err) 370 if (err)
366 goto out; 371 goto out;
367 372
373 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
374 dev_cap->supported_port_types[i] = field & 3;
368 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET); 375 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
369 dev_cap->max_mtu[i] = field & 0xf; 376 dev_cap->ib_mtu[i] = field & 0xf;
370 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET); 377 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
371 dev_cap->max_port_width[i] = field & 0xf; 378 dev_cap->max_port_width[i] = field & 0xf;
372 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET); 379 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
@@ -374,6 +381,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
374 dev_cap->max_pkeys[i] = 1 << (field & 0xf); 381 dev_cap->max_pkeys[i] = 1 << (field & 0xf);
375 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); 382 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
376 dev_cap->max_vl[i] = field & 0xf; 383 dev_cap->max_vl[i] = field & 0xf;
384 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
385 dev_cap->log_max_macs[i] = field & 0xf;
386 dev_cap->log_max_vlans[i] = field >> 4;
387 MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
388 MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
377 } 389 }
378 } 390 }
379 391
@@ -407,7 +419,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
407 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", 419 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
408 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz); 420 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
409 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n", 421 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
410 dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1], 422 dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
411 dev_cap->max_port_width[1]); 423 dev_cap->max_port_width[1]);
412 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n", 424 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
413 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); 425 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
@@ -819,7 +831,7 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
819 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT; 831 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
820 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET); 832 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
821 833
822 field = 128 << dev->caps.mtu_cap[port]; 834 field = 128 << dev->caps.ib_mtu_cap[port];
823 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET); 835 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
824 field = dev->caps.gid_table_len[port]; 836 field = dev->caps.gid_table_len[port];
825 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET); 837 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index decbb5c2ad41..526d7f30c041 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -66,11 +66,13 @@ struct mlx4_dev_cap {
66 int local_ca_ack_delay; 66 int local_ca_ack_delay;
67 int num_ports; 67 int num_ports;
68 u32 max_msg_sz; 68 u32 max_msg_sz;
69 int max_mtu[MLX4_MAX_PORTS + 1]; 69 int ib_mtu[MLX4_MAX_PORTS + 1];
70 int max_port_width[MLX4_MAX_PORTS + 1]; 70 int max_port_width[MLX4_MAX_PORTS + 1];
71 int max_vl[MLX4_MAX_PORTS + 1]; 71 int max_vl[MLX4_MAX_PORTS + 1];
72 int max_gids[MLX4_MAX_PORTS + 1]; 72 int max_gids[MLX4_MAX_PORTS + 1];
73 int max_pkeys[MLX4_MAX_PORTS + 1]; 73 int max_pkeys[MLX4_MAX_PORTS + 1];
74 u64 def_mac[MLX4_MAX_PORTS + 1];
75 u16 eth_mtu[MLX4_MAX_PORTS + 1];
74 u16 stat_rate_support; 76 u16 stat_rate_support;
75 u32 flags; 77 u32 flags;
76 int reserved_uars; 78 int reserved_uars;
@@ -102,6 +104,9 @@ struct mlx4_dev_cap {
102 u32 reserved_lkey; 104 u32 reserved_lkey;
103 u64 max_icm_sz; 105 u64 max_icm_sz;
104 int max_gso_sz; 106 int max_gso_sz;
107 u8 supported_port_types[MLX4_MAX_PORTS + 1];
108 u8 log_max_macs[MLX4_MAX_PORTS + 1];
109 u8 log_max_vlans[MLX4_MAX_PORTS + 1];
105}; 110};
106 111
107struct mlx4_adapter { 112struct mlx4_adapter {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 1252a919de2e..468921b8f4b6 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -85,6 +85,57 @@ static struct mlx4_profile default_profile = {
85 .num_mtt = 1 << 20, 85 .num_mtt = 1 << 20,
86}; 86};
87 87
88static int log_num_mac = 2;
89module_param_named(log_num_mac, log_num_mac, int, 0444);
90MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
91
92static int log_num_vlan;
93module_param_named(log_num_vlan, log_num_vlan, int, 0444);
94MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
95
96static int use_prio;
97module_param_named(use_prio, use_prio, bool, 0444);
98MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
99 "(0/1, default 0)");
100
101static int mlx4_check_port_params(struct mlx4_dev *dev,
102 enum mlx4_port_type *port_type)
103{
104 int i;
105
106 for (i = 0; i < dev->caps.num_ports - 1; i++) {
107 if (port_type[i] != port_type[i+1] &&
108 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
109 mlx4_err(dev, "Only same port types supported "
110 "on this HCA, aborting.\n");
111 return -EINVAL;
112 }
113 }
114 if ((port_type[0] == MLX4_PORT_TYPE_ETH) &&
115 (port_type[1] == MLX4_PORT_TYPE_IB)) {
116 mlx4_err(dev, "eth-ib configuration is not supported.\n");
117 return -EINVAL;
118 }
119
120 for (i = 0; i < dev->caps.num_ports; i++) {
121 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
122 mlx4_err(dev, "Requested port type for port %d is not "
123 "supported on this HCA\n", i + 1);
124 return -EINVAL;
125 }
126 }
127 return 0;
128}
129
130static void mlx4_set_port_mask(struct mlx4_dev *dev)
131{
132 int i;
133
134 dev->caps.port_mask = 0;
135 for (i = 1; i <= dev->caps.num_ports; ++i)
136 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
137 dev->caps.port_mask |= 1 << (i - 1);
138}
88static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 139static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
89{ 140{
90 int err; 141 int err;
@@ -120,10 +171,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
120 dev->caps.num_ports = dev_cap->num_ports; 171 dev->caps.num_ports = dev_cap->num_ports;
121 for (i = 1; i <= dev->caps.num_ports; ++i) { 172 for (i = 1; i <= dev->caps.num_ports; ++i) {
122 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 173 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
123 dev->caps.mtu_cap[i] = dev_cap->max_mtu[i]; 174 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
124 dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; 175 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
125 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; 176 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
126 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; 177 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
178 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
179 dev->caps.def_mac[i] = dev_cap->def_mac[i];
180 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
127 } 181 }
128 182
129 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 183 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
@@ -134,7 +188,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
134 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 188 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
135 dev->caps.max_wqes = dev_cap->max_qp_sz; 189 dev->caps.max_wqes = dev_cap->max_qp_sz;
136 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 190 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
137 dev->caps.reserved_qps = dev_cap->reserved_qps;
138 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 191 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
139 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 192 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
140 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 193 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
@@ -163,9 +216,138 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
163 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 216 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
164 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 217 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
165 218
219 dev->caps.log_num_macs = log_num_mac;
220 dev->caps.log_num_vlans = log_num_vlan;
221 dev->caps.log_num_prios = use_prio ? 3 : 0;
222
223 for (i = 1; i <= dev->caps.num_ports; ++i) {
224 if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
225 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
226 else
227 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
228
229 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
230 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
231 mlx4_warn(dev, "Requested number of MACs is too much "
232 "for port %d, reducing to %d.\n",
233 i, 1 << dev->caps.log_num_macs);
234 }
235 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
236 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
237 mlx4_warn(dev, "Requested number of VLANs is too much "
238 "for port %d, reducing to %d.\n",
239 i, 1 << dev->caps.log_num_vlans);
240 }
241 }
242
243 mlx4_set_port_mask(dev);
244
245 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
246 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
247 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
248 (1 << dev->caps.log_num_macs) *
249 (1 << dev->caps.log_num_vlans) *
250 (1 << dev->caps.log_num_prios) *
251 dev->caps.num_ports;
252 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
253
254 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
255 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
256 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
257 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
258
166 return 0; 259 return 0;
167} 260}
168 261
262/*
263 * Change the port configuration of the device.
264 * Every user of this function must hold the port mutex.
265 */
266static int mlx4_change_port_types(struct mlx4_dev *dev,
267 enum mlx4_port_type *port_types)
268{
269 int err = 0;
270 int change = 0;
271 int port;
272
273 for (port = 0; port < dev->caps.num_ports; port++) {
274 if (port_types[port] != dev->caps.port_type[port + 1]) {
275 change = 1;
276 dev->caps.port_type[port + 1] = port_types[port];
277 }
278 }
279 if (change) {
280 mlx4_unregister_device(dev);
281 for (port = 1; port <= dev->caps.num_ports; port++) {
282 mlx4_CLOSE_PORT(dev, port);
283 err = mlx4_SET_PORT(dev, port);
284 if (err) {
285 mlx4_err(dev, "Failed to set port %d, "
286 "aborting\n", port);
287 goto out;
288 }
289 }
290 mlx4_set_port_mask(dev);
291 err = mlx4_register_device(dev);
292 }
293
294out:
295 return err;
296}
297
298static ssize_t show_port_type(struct device *dev,
299 struct device_attribute *attr,
300 char *buf)
301{
302 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
303 port_attr);
304 struct mlx4_dev *mdev = info->dev;
305
306 return sprintf(buf, "%s\n",
307 mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ?
308 "ib" : "eth");
309}
310
311static ssize_t set_port_type(struct device *dev,
312 struct device_attribute *attr,
313 const char *buf, size_t count)
314{
315 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
316 port_attr);
317 struct mlx4_dev *mdev = info->dev;
318 struct mlx4_priv *priv = mlx4_priv(mdev);
319 enum mlx4_port_type types[MLX4_MAX_PORTS];
320 int i;
321 int err = 0;
322
323 if (!strcmp(buf, "ib\n"))
324 info->tmp_type = MLX4_PORT_TYPE_IB;
325 else if (!strcmp(buf, "eth\n"))
326 info->tmp_type = MLX4_PORT_TYPE_ETH;
327 else {
328 mlx4_err(mdev, "%s is not supported port type\n", buf);
329 return -EINVAL;
330 }
331
332 mutex_lock(&priv->port_mutex);
333 for (i = 0; i < mdev->caps.num_ports; i++)
334 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
335 mdev->caps.port_type[i+1];
336
337 err = mlx4_check_port_params(mdev, types);
338 if (err)
339 goto out;
340
341 for (i = 1; i <= mdev->caps.num_ports; i++)
342 priv->port[i].tmp_type = 0;
343
344 err = mlx4_change_port_types(mdev, types);
345
346out:
347 mutex_unlock(&priv->port_mutex);
348 return err ? err : count;
349}
350
169static int mlx4_load_fw(struct mlx4_dev *dev) 351static int mlx4_load_fw(struct mlx4_dev *dev)
170{ 352{
171 struct mlx4_priv *priv = mlx4_priv(dev); 353 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -211,7 +393,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
211 ((u64) (MLX4_CMPT_TYPE_QP * 393 ((u64) (MLX4_CMPT_TYPE_QP *
212 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 394 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
213 cmpt_entry_sz, dev->caps.num_qps, 395 cmpt_entry_sz, dev->caps.num_qps,
214 dev->caps.reserved_qps, 0, 0); 396 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
397 0, 0);
215 if (err) 398 if (err)
216 goto err; 399 goto err;
217 400
@@ -336,7 +519,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
336 init_hca->qpc_base, 519 init_hca->qpc_base,
337 dev_cap->qpc_entry_sz, 520 dev_cap->qpc_entry_sz,
338 dev->caps.num_qps, 521 dev->caps.num_qps,
339 dev->caps.reserved_qps, 0, 0); 522 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
523 0, 0);
340 if (err) { 524 if (err) {
341 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 525 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
342 goto err_unmap_dmpt; 526 goto err_unmap_dmpt;
@@ -346,7 +530,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
346 init_hca->auxc_base, 530 init_hca->auxc_base,
347 dev_cap->aux_entry_sz, 531 dev_cap->aux_entry_sz,
348 dev->caps.num_qps, 532 dev->caps.num_qps,
349 dev->caps.reserved_qps, 0, 0); 533 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
534 0, 0);
350 if (err) { 535 if (err) {
351 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 536 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
352 goto err_unmap_qp; 537 goto err_unmap_qp;
@@ -356,7 +541,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
356 init_hca->altc_base, 541 init_hca->altc_base,
357 dev_cap->altc_entry_sz, 542 dev_cap->altc_entry_sz,
358 dev->caps.num_qps, 543 dev->caps.num_qps,
359 dev->caps.reserved_qps, 0, 0); 544 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
545 0, 0);
360 if (err) { 546 if (err) {
361 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 547 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
362 goto err_unmap_auxc; 548 goto err_unmap_auxc;
@@ -366,7 +552,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
366 init_hca->rdmarc_base, 552 init_hca->rdmarc_base,
367 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 553 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
368 dev->caps.num_qps, 554 dev->caps.num_qps,
369 dev->caps.reserved_qps, 0, 0); 555 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
556 0, 0);
370 if (err) { 557 if (err) {
371 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 558 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
372 goto err_unmap_altc; 559 goto err_unmap_altc;
@@ -565,6 +752,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
565{ 752{
566 struct mlx4_priv *priv = mlx4_priv(dev); 753 struct mlx4_priv *priv = mlx4_priv(dev);
567 int err; 754 int err;
755 int port;
568 756
569 err = mlx4_init_uar_table(dev); 757 err = mlx4_init_uar_table(dev);
570 if (err) { 758 if (err) {
@@ -663,8 +851,20 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
663 goto err_qp_table_free; 851 goto err_qp_table_free;
664 } 852 }
665 853
854 for (port = 1; port <= dev->caps.num_ports; port++) {
855 err = mlx4_SET_PORT(dev, port);
856 if (err) {
857 mlx4_err(dev, "Failed to set port %d, aborting\n",
858 port);
859 goto err_mcg_table_free;
860 }
861 }
862
666 return 0; 863 return 0;
667 864
865err_mcg_table_free:
866 mlx4_cleanup_mcg_table(dev);
867
668err_qp_table_free: 868err_qp_table_free:
669 mlx4_cleanup_qp_table(dev); 869 mlx4_cleanup_qp_table(dev);
670 870
@@ -728,11 +928,45 @@ no_msi:
728 priv->eq_table.eq[i].irq = dev->pdev->irq; 928 priv->eq_table.eq[i].irq = dev->pdev->irq;
729} 929}
730 930
931static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
932{
933 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
934 int err = 0;
935
936 info->dev = dev;
937 info->port = port;
938 mlx4_init_mac_table(dev, &info->mac_table);
939 mlx4_init_vlan_table(dev, &info->vlan_table);
940
941 sprintf(info->dev_name, "mlx4_port%d", port);
942 info->port_attr.attr.name = info->dev_name;
943 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
944 info->port_attr.show = show_port_type;
945 info->port_attr.store = set_port_type;
946
947 err = device_create_file(&dev->pdev->dev, &info->port_attr);
948 if (err) {
949 mlx4_err(dev, "Failed to create file for port %d\n", port);
950 info->port = -1;
951 }
952
953 return err;
954}
955
956static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
957{
958 if (info->port < 0)
959 return;
960
961 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
962}
963
731static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 964static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
732{ 965{
733 struct mlx4_priv *priv; 966 struct mlx4_priv *priv;
734 struct mlx4_dev *dev; 967 struct mlx4_dev *dev;
735 int err; 968 int err;
969 int port;
736 970
737 printk(KERN_INFO PFX "Initializing %s\n", 971 printk(KERN_INFO PFX "Initializing %s\n",
738 pci_name(pdev)); 972 pci_name(pdev));
@@ -807,6 +1041,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
807 INIT_LIST_HEAD(&priv->ctx_list); 1041 INIT_LIST_HEAD(&priv->ctx_list);
808 spin_lock_init(&priv->ctx_lock); 1042 spin_lock_init(&priv->ctx_lock);
809 1043
1044 mutex_init(&priv->port_mutex);
1045
810 INIT_LIST_HEAD(&priv->pgdir_list); 1046 INIT_LIST_HEAD(&priv->pgdir_list);
811 mutex_init(&priv->pgdir_mutex); 1047 mutex_init(&priv->pgdir_mutex);
812 1048
@@ -842,15 +1078,24 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
842 if (err) 1078 if (err)
843 goto err_close; 1079 goto err_close;
844 1080
1081 for (port = 1; port <= dev->caps.num_ports; port++) {
1082 err = mlx4_init_port_info(dev, port);
1083 if (err)
1084 goto err_port;
1085 }
1086
845 err = mlx4_register_device(dev); 1087 err = mlx4_register_device(dev);
846 if (err) 1088 if (err)
847 goto err_cleanup; 1089 goto err_port;
848 1090
849 pci_set_drvdata(pdev, dev); 1091 pci_set_drvdata(pdev, dev);
850 1092
851 return 0; 1093 return 0;
852 1094
853err_cleanup: 1095err_port:
1096 for (port = 1; port <= dev->caps.num_ports; port++)
1097 mlx4_cleanup_port_info(&priv->port[port]);
1098
854 mlx4_cleanup_mcg_table(dev); 1099 mlx4_cleanup_mcg_table(dev);
855 mlx4_cleanup_qp_table(dev); 1100 mlx4_cleanup_qp_table(dev);
856 mlx4_cleanup_srq_table(dev); 1101 mlx4_cleanup_srq_table(dev);
@@ -907,8 +1152,10 @@ static void mlx4_remove_one(struct pci_dev *pdev)
907 if (dev) { 1152 if (dev) {
908 mlx4_unregister_device(dev); 1153 mlx4_unregister_device(dev);
909 1154
910 for (p = 1; p <= dev->caps.num_ports; ++p) 1155 for (p = 1; p <= dev->caps.num_ports; p++) {
1156 mlx4_cleanup_port_info(&priv->port[p]);
911 mlx4_CLOSE_PORT(dev, p); 1157 mlx4_CLOSE_PORT(dev, p);
1158 }
912 1159
913 mlx4_cleanup_mcg_table(dev); 1160 mlx4_cleanup_mcg_table(dev);
914 mlx4_cleanup_qp_table(dev); 1161 mlx4_cleanup_qp_table(dev);
@@ -948,6 +1195,8 @@ static struct pci_device_id mlx4_pci_table[] = {
948 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ 1195 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
949 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ 1196 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
950 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ 1197 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
1198 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
1199 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
951 { 0, } 1200 { 0, }
952}; 1201};
953 1202
@@ -960,10 +1209,28 @@ static struct pci_driver mlx4_driver = {
960 .remove = __devexit_p(mlx4_remove_one) 1209 .remove = __devexit_p(mlx4_remove_one)
961}; 1210};
962 1211
1212static int __init mlx4_verify_params(void)
1213{
1214 if ((log_num_mac < 0) || (log_num_mac > 7)) {
1215 printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac);
1216 return -1;
1217 }
1218
1219 if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
1220 printk(KERN_WARNING "mlx4_core: bad num_vlan: %d\n", log_num_vlan);
1221 return -1;
1222 }
1223
1224 return 0;
1225}
1226
963static int __init mlx4_init(void) 1227static int __init mlx4_init(void)
964{ 1228{
965 int ret; 1229 int ret;
966 1230
1231 if (mlx4_verify_params())
1232 return -EINVAL;
1233
967 ret = mlx4_catas_init(); 1234 ret = mlx4_catas_init();
968 if (ret) 1235 if (ret)
969 return ret; 1236 return ret;
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c83f88ce0736..592c01ae2c5d 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -368,8 +368,8 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
368 struct mlx4_priv *priv = mlx4_priv(dev); 368 struct mlx4_priv *priv = mlx4_priv(dev);
369 int err; 369 int err;
370 370
371 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, 371 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
372 dev->caps.num_amgms, dev->caps.num_amgms - 1, 0); 372 dev->caps.num_amgms - 1, 0, 0);
373 if (err) 373 if (err)
374 return err; 374 return err;
375 375
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 5337e3ac3e78..fa431fad0eec 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -111,6 +111,7 @@ struct mlx4_bitmap {
111 u32 last; 111 u32 last;
112 u32 top; 112 u32 top;
113 u32 max; 113 u32 max;
114 u32 reserved_top;
114 u32 mask; 115 u32 mask;
115 spinlock_t lock; 116 spinlock_t lock;
116 unsigned long *table; 117 unsigned long *table;
@@ -251,6 +252,38 @@ struct mlx4_catas_err {
251 struct list_head list; 252 struct list_head list;
252}; 253};
253 254
255#define MLX4_MAX_MAC_NUM 128
256#define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3)
257
258struct mlx4_mac_table {
259 __be64 entries[MLX4_MAX_MAC_NUM];
260 int refs[MLX4_MAX_MAC_NUM];
261 struct mutex mutex;
262 int total;
263 int max;
264};
265
266#define MLX4_MAX_VLAN_NUM 128
267#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
268
269struct mlx4_vlan_table {
270 __be32 entries[MLX4_MAX_VLAN_NUM];
271 int refs[MLX4_MAX_VLAN_NUM];
272 struct mutex mutex;
273 int total;
274 int max;
275};
276
277struct mlx4_port_info {
278 struct mlx4_dev *dev;
279 int port;
280 char dev_name[16];
281 struct device_attribute port_attr;
282 enum mlx4_port_type tmp_type;
283 struct mlx4_mac_table mac_table;
284 struct mlx4_vlan_table vlan_table;
285};
286
254struct mlx4_priv { 287struct mlx4_priv {
255 struct mlx4_dev dev; 288 struct mlx4_dev dev;
256 289
@@ -279,6 +312,8 @@ struct mlx4_priv {
279 312
280 struct mlx4_uar driver_uar; 313 struct mlx4_uar driver_uar;
281 void __iomem *kar; 314 void __iomem *kar;
315 struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
316 struct mutex port_mutex;
282}; 317};
283 318
284static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 319static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -288,7 +323,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
288 323
289u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); 324u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
290void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); 325void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
291int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved); 326u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
327void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
328int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
329 u32 reserved_bot, u32 resetrved_top);
292void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); 330void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
293 331
294int mlx4_reset(struct mlx4_dev *dev); 332int mlx4_reset(struct mlx4_dev *dev);
@@ -346,4 +384,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
346 384
347void mlx4_handle_catas_err(struct mlx4_dev *dev); 385void mlx4_handle_catas_err(struct mlx4_dev *dev);
348 386
387void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
388void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
389
390int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
391
349#endif /* MLX4_H */ 392#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
new file mode 100644
index 000000000000..11fb17c6e97b
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -0,0 +1,561 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef _MLX4_EN_H_
35#define _MLX4_EN_H_
36
37#include <linux/compiler.h>
38#include <linux/list.h>
39#include <linux/mutex.h>
40#include <linux/netdevice.h>
41#include <linux/inet_lro.h>
42
43#include <linux/mlx4/device.h>
44#include <linux/mlx4/qp.h>
45#include <linux/mlx4/cq.h>
46#include <linux/mlx4/srq.h>
47#include <linux/mlx4/doorbell.h>
48
49#include "en_port.h"
50
51#define DRV_NAME "mlx4_en"
52#define DRV_VERSION "1.4.0"
53#define DRV_RELDATE "Sep 2008"
54
55
56#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
57
58#define mlx4_dbg(mlevel, priv, format, arg...) \
59 if (NETIF_MSG_##mlevel & priv->msg_enable) \
60 printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\
61 (&priv->mdev->pdev->dev)->bus_id , ## arg)
62
63#define mlx4_err(mdev, format, arg...) \
64 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
65 (&mdev->pdev->dev)->bus_id , ## arg)
66#define mlx4_info(mdev, format, arg...) \
67 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
68 (&mdev->pdev->dev)->bus_id , ## arg)
69#define mlx4_warn(mdev, format, arg...) \
70 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
71 (&mdev->pdev->dev)->bus_id , ## arg)
72
73/*
74 * Device constants
75 */
76
77
78#define MLX4_EN_PAGE_SHIFT 12
79#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
80#define MAX_TX_RINGS 16
81#define MAX_RX_RINGS 16
82#define MAX_RSS_MAP_SIZE 64
83#define RSS_FACTOR 2
84#define TXBB_SIZE 64
85#define HEADROOM (2048 / TXBB_SIZE + 1)
86#define MAX_LSO_HDR_SIZE 92
87#define STAMP_STRIDE 64
88#define STAMP_DWORDS (STAMP_STRIDE / 4)
89#define STAMP_SHIFT 31
90#define STAMP_VAL 0x7fffffff
91#define STATS_DELAY (HZ / 4)
92
93/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
94#define MAX_DESC_SIZE 512
95#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
96
97/*
98 * OS related constants and tunables
99 */
100
101#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
102
103#define MLX4_EN_ALLOC_ORDER 2
104#define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
105
106#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
107
108/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
109 * and 4K allocations) */
110enum {
111 FRAG_SZ0 = 512 - NET_IP_ALIGN,
112 FRAG_SZ1 = 1024,
113 FRAG_SZ2 = 4096,
114 FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
115};
116#define MLX4_EN_MAX_RX_FRAGS 4
117
118/* Minimum ring size for our page-allocation sceme to work */
119#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
120#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
121
122#define MLX4_EN_TX_RING_NUM 9
123#define MLX4_EN_DEF_TX_RING_SIZE 1024
124#define MLX4_EN_DEF_RX_RING_SIZE 1024
125
126/* Target number of bytes to coalesce with interrupt moderation */
127#define MLX4_EN_RX_COAL_TARGET 0x20000
128#define MLX4_EN_RX_COAL_TIME 0x10
129
130#define MLX4_EN_TX_COAL_PKTS 5
131#define MLX4_EN_TX_COAL_TIME 0x80
132
133#define MLX4_EN_RX_RATE_LOW 400000
134#define MLX4_EN_RX_COAL_TIME_LOW 0
135#define MLX4_EN_RX_RATE_HIGH 450000
136#define MLX4_EN_RX_COAL_TIME_HIGH 128
137#define MLX4_EN_RX_SIZE_THRESH 1024
138#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
139#define MLX4_EN_SAMPLE_INTERVAL 0
140
141#define MLX4_EN_AUTO_CONF 0xffff
142
143#define MLX4_EN_DEF_RX_PAUSE 1
144#define MLX4_EN_DEF_TX_PAUSE 1
145
146/* Interval between sucessive polls in the Tx routine when polling is used
147 instead of interrupts (in per-core Tx rings) - should be power of 2 */
148#define MLX4_EN_TX_POLL_MODER 16
149#define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
150
151#define ETH_LLC_SNAP_SIZE 8
152
153#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
154#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
155
156#define MLX4_EN_MIN_MTU 46
157#define ETH_BCAST 0xffffffffffffULL
158
159#ifdef MLX4_EN_PERF_STAT
160/* Number of samples to 'average' */
161#define AVG_SIZE 128
162#define AVG_FACTOR 1024
163#define NUM_PERF_STATS NUM_PERF_COUNTERS
164
165#define INC_PERF_COUNTER(cnt) (++(cnt))
166#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
167#define AVG_PERF_COUNTER(cnt, sample) \
168 ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
169#define GET_PERF_COUNTER(cnt) (cnt)
170#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
171
172#else
173
174#define NUM_PERF_STATS 0
175#define INC_PERF_COUNTER(cnt) do {} while (0)
176#define ADD_PERF_COUNTER(cnt, add) do {} while (0)
177#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
178#define GET_PERF_COUNTER(cnt) (0)
179#define GET_AVG_PERF_COUNTER(cnt) (0)
180#endif /* MLX4_EN_PERF_STAT */
181
182/*
183 * Configurables
184 */
185
186enum cq_type {
187 RX = 0,
188 TX = 1,
189};
190
191
192/*
193 * Useful macros
194 */
195#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
196#define XNOR(x, y) (!(x) == !(y))
197#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
198
199
200struct mlx4_en_tx_info {
201 struct sk_buff *skb;
202 u32 nr_txbb;
203 u8 linear;
204 u8 data_offset;
205};
206
207
208#define MLX4_EN_BIT_DESC_OWN 0x80000000
209#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
210#define MLX4_EN_MEMTYPE_PAD 0x100
211#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
212
213
214struct mlx4_en_tx_desc {
215 struct mlx4_wqe_ctrl_seg ctrl;
216 union {
217 struct mlx4_wqe_data_seg data; /* at least one data segment */
218 struct mlx4_wqe_lso_seg lso;
219 struct mlx4_wqe_inline_seg inl;
220 };
221};
222
223#define MLX4_EN_USE_SRQ 0x01000000
224
225struct mlx4_en_rx_alloc {
226 struct page *page;
227 u16 offset;
228};
229
230struct mlx4_en_tx_ring {
231 struct mlx4_hwq_resources wqres;
232 u32 size ; /* number of TXBBs */
233 u32 size_mask;
234 u16 stride;
235 u16 cqn; /* index of port CQ associated with this ring */
236 u32 prod;
237 u32 cons;
238 u32 buf_size;
239 u32 doorbell_qpn;
240 void *buf;
241 u16 poll_cnt;
242 int blocked;
243 struct mlx4_en_tx_info *tx_info;
244 u8 *bounce_buf;
245 u32 last_nr_txbb;
246 struct mlx4_qp qp;
247 struct mlx4_qp_context context;
248 int qpn;
249 enum mlx4_qp_state qp_state;
250 struct mlx4_srq dummy;
251 unsigned long bytes;
252 unsigned long packets;
253 spinlock_t comp_lock;
254};
255
256struct mlx4_en_rx_desc {
257 struct mlx4_wqe_srq_next_seg next;
258 /* actual number of entries depends on rx ring stride */
259 struct mlx4_wqe_data_seg data[0];
260};
261
262struct mlx4_en_rx_ring {
263 struct mlx4_srq srq;
264 struct mlx4_hwq_resources wqres;
265 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
266 struct net_lro_mgr lro;
267 u32 size ; /* number of Rx descs*/
268 u32 actual_size;
269 u32 size_mask;
270 u16 stride;
271 u16 log_stride;
272 u16 cqn; /* index of port CQ associated with this ring */
273 u32 prod;
274 u32 cons;
275 u32 buf_size;
276 int need_refill;
277 int full;
278 void *buf;
279 void *rx_info;
280 unsigned long bytes;
281 unsigned long packets;
282};
283
284
285static inline int mlx4_en_can_lro(__be16 status)
286{
287 return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
288 MLX4_CQE_STATUS_IPV4F |
289 MLX4_CQE_STATUS_IPV6 |
290 MLX4_CQE_STATUS_IPV4OPT |
291 MLX4_CQE_STATUS_TCP |
292 MLX4_CQE_STATUS_UDP |
293 MLX4_CQE_STATUS_IPOK)) ==
294 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
295 MLX4_CQE_STATUS_IPOK |
296 MLX4_CQE_STATUS_TCP);
297}
298
299struct mlx4_en_cq {
300 struct mlx4_cq mcq;
301 struct mlx4_hwq_resources wqres;
302 int ring;
303 spinlock_t lock;
304 struct net_device *dev;
305 struct napi_struct napi;
306 /* Per-core Tx cq processing support */
307 struct timer_list timer;
308 int size;
309 int buf_size;
310 unsigned vector;
311 enum cq_type is_tx;
312 u16 moder_time;
313 u16 moder_cnt;
314 int armed;
315 struct mlx4_cqe *buf;
316#define MLX4_EN_OPCODE_ERROR 0x1e
317};
318
319struct mlx4_en_port_profile {
320 u32 flags;
321 u32 tx_ring_num;
322 u32 rx_ring_num;
323 u32 tx_ring_size;
324 u32 rx_ring_size;
325};
326
327struct mlx4_en_profile {
328 int rss_xor;
329 int num_lro;
330 u8 rss_mask;
331 u32 active_ports;
332 u32 small_pkt_int;
333 int rx_moder_cnt;
334 int rx_moder_time;
335 int auto_moder;
336 u8 rx_pause;
337 u8 rx_ppp;
338 u8 tx_pause;
339 u8 tx_ppp;
340 u8 no_reset;
341 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
342};
343
344struct mlx4_en_dev {
345 struct mlx4_dev *dev;
346 struct pci_dev *pdev;
347 struct mutex state_lock;
348 struct net_device *pndev[MLX4_MAX_PORTS + 1];
349 u32 port_cnt;
350 bool device_up;
351 struct mlx4_en_profile profile;
352 u32 LSO_support;
353 struct workqueue_struct *workqueue;
354 struct device *dma_device;
355 void __iomem *uar_map;
356 struct mlx4_uar priv_uar;
357 struct mlx4_mr mr;
358 u32 priv_pdn;
359 spinlock_t uar_lock;
360};
361
362
363struct mlx4_en_rss_map {
364 int size;
365 int base_qpn;
366 u16 map[MAX_RSS_MAP_SIZE];
367 struct mlx4_qp qps[MAX_RSS_MAP_SIZE];
368 enum mlx4_qp_state state[MAX_RSS_MAP_SIZE];
369 struct mlx4_qp indir_qp;
370 enum mlx4_qp_state indir_state;
371};
372
373struct mlx4_en_rss_context {
374 __be32 base_qpn;
375 __be32 default_qpn;
376 u16 reserved;
377 u8 hash_fn;
378 u8 flags;
379 __be32 rss_key[10];
380};
381
382struct mlx4_en_pkt_stats {
383 unsigned long broadcast;
384 unsigned long rx_prio[8];
385 unsigned long tx_prio[8];
386#define NUM_PKT_STATS 17
387};
388
389struct mlx4_en_port_stats {
390 unsigned long lro_aggregated;
391 unsigned long lro_flushed;
392 unsigned long lro_no_desc;
393 unsigned long tso_packets;
394 unsigned long queue_stopped;
395 unsigned long wake_queue;
396 unsigned long tx_timeout;
397 unsigned long rx_alloc_failed;
398 unsigned long rx_chksum_good;
399 unsigned long rx_chksum_none;
400 unsigned long tx_chksum_offload;
401#define NUM_PORT_STATS 11
402};
403
404struct mlx4_en_perf_stats {
405 u32 tx_poll;
406 u64 tx_pktsz_avg;
407 u32 inflight_avg;
408 u16 tx_coal_avg;
409 u16 rx_coal_avg;
410 u32 napi_quota;
411#define NUM_PERF_COUNTERS 6
412};
413
414struct mlx4_en_frag_info {
415 u16 frag_size;
416 u16 frag_prefix_size;
417 u16 frag_stride;
418 u16 frag_align;
419 u16 last_offset;
420
421};
422
423struct mlx4_en_priv {
424 struct mlx4_en_dev *mdev;
425 struct mlx4_en_port_profile *prof;
426 struct net_device *dev;
427 struct vlan_group *vlgrp;
428 struct net_device_stats stats;
429 struct net_device_stats ret_stats;
430 spinlock_t stats_lock;
431
432 unsigned long last_moder_packets;
433 unsigned long last_moder_tx_packets;
434 unsigned long last_moder_bytes;
435 unsigned long last_moder_jiffies;
436 int last_moder_time;
437 u16 rx_usecs;
438 u16 rx_frames;
439 u16 tx_usecs;
440 u16 tx_frames;
441 u32 pkt_rate_low;
442 u16 rx_usecs_low;
443 u32 pkt_rate_high;
444 u16 rx_usecs_high;
445 u16 sample_interval;
446 u16 adaptive_rx_coal;
447 u32 msg_enable;
448
449 struct mlx4_hwq_resources res;
450 int link_state;
451 int last_link_state;
452 bool port_up;
453 int port;
454 int registered;
455 int allocated;
456 int stride;
457 int rx_csum;
458 u64 mac;
459 int mac_index;
460 unsigned max_mtu;
461 int base_qpn;
462
463 struct mlx4_en_rss_map rss_map;
464 u16 tx_prio_map[8];
465 u32 flags;
466#define MLX4_EN_FLAG_PROMISC 0x1
467 u32 tx_ring_num;
468 u32 rx_ring_num;
469 u32 rx_skb_size;
470 struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
471 u16 num_frags;
472 u16 log_rx_info;
473
474 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
475 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
476 struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
477 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
478 struct work_struct mcast_task;
479 struct work_struct mac_task;
480 struct delayed_work refill_task;
481 struct work_struct watchdog_task;
482 struct work_struct linkstate_task;
483 struct delayed_work stats_task;
484 struct mlx4_en_perf_stats pstats;
485 struct mlx4_en_pkt_stats pkstats;
486 struct mlx4_en_port_stats port_stats;
487 struct dev_mc_list *mc_list;
488 struct mlx4_en_stat_out_mbox hw_stats;
489};
490
491
492void mlx4_en_destroy_netdev(struct net_device *dev);
493int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
494 struct mlx4_en_port_profile *prof);
495
496int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
497
498int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
499 int entries, int ring, enum cq_type mode);
500void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
501int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
502void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
503int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
504int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
505
506void mlx4_en_poll_tx_cq(unsigned long data);
507void mlx4_en_tx_irq(struct mlx4_cq *mcq);
508int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
509
510int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
511 u32 size, u16 stride);
512void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
513int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
514 struct mlx4_en_tx_ring *ring,
515 int cq, int srqn);
516void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
517 struct mlx4_en_tx_ring *ring);
518
519int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
520 struct mlx4_en_rx_ring *ring,
521 u32 size, u16 stride);
522void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
523 struct mlx4_en_rx_ring *ring);
524int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
525void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
526 struct mlx4_en_rx_ring *ring);
527int mlx4_en_process_rx_cq(struct net_device *dev,
528 struct mlx4_en_cq *cq,
529 int budget);
530int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
531void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
532 int is_tx, int rss, int qpn, int cqn, int srqn,
533 struct mlx4_qp_context *context);
534int mlx4_en_map_buffer(struct mlx4_buf *buf);
535void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
536
537void mlx4_en_calc_rx_buf(struct net_device *dev);
538void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
539 struct mlx4_en_rss_map *rss_map,
540 int num_entries, int num_rings);
541void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
542int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
543void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
544int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
545void mlx4_en_rx_refill(struct work_struct *work);
546void mlx4_en_rx_irq(struct mlx4_cq *mcq);
547
548int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
549int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp);
550int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
551 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
552int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
553 u8 promisc);
554
555int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
556
557/*
558 * Globals
559 */
560extern const struct ethtool_ops mlx4_en_ethtool_ops;
561#endif
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index d1dd5b48dbd1..0caf74cae8bc 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -461,7 +461,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
461 int err; 461 int err;
462 462
463 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 463 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
464 ~0, dev->caps.reserved_mrws); 464 ~0, dev->caps.reserved_mrws, 0);
465 if (err) 465 if (err)
466 return err; 466 return err;
467 467
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index aa616892d09c..26d1a7a9e375 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -62,7 +62,7 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
62 struct mlx4_priv *priv = mlx4_priv(dev); 62 struct mlx4_priv *priv = mlx4_priv(dev);
63 63
64 return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds, 64 return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
65 (1 << 24) - 1, dev->caps.reserved_pds); 65 (1 << 24) - 1, dev->caps.reserved_pds, 0);
66} 66}
67 67
68void mlx4_cleanup_pd_table(struct mlx4_dev *dev) 68void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -100,7 +100,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
100 100
101 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap, 101 return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
102 dev->caps.num_uars, dev->caps.num_uars - 1, 102 dev->caps.num_uars, dev->caps.num_uars - 1,
103 max(128, dev->caps.reserved_uars)); 103 max(128, dev->caps.reserved_uars), 0);
104} 104}
105 105
106void mlx4_cleanup_uar_table(struct mlx4_dev *dev) 106void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
new file mode 100644
index 000000000000..e2fdab42c4ce
--- /dev/null
+++ b/drivers/net/mlx4/port.c
@@ -0,0 +1,282 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/errno.h>
34#include <linux/if_ether.h>
35
36#include <linux/mlx4/cmd.h>
37
38#include "mlx4.h"
39
40#define MLX4_MAC_VALID (1ull << 63)
41#define MLX4_MAC_MASK 0xffffffffffffULL
42
43#define MLX4_VLAN_VALID (1u << 31)
44#define MLX4_VLAN_MASK 0xfff
45
46void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
47{
48 int i;
49
50 mutex_init(&table->mutex);
51 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
52 table->entries[i] = 0;
53 table->refs[i] = 0;
54 }
55 table->max = 1 << dev->caps.log_num_macs;
56 table->total = 0;
57}
58
59void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
60{
61 int i;
62
63 mutex_init(&table->mutex);
64 for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
65 table->entries[i] = 0;
66 table->refs[i] = 0;
67 }
68 table->max = 1 << dev->caps.log_num_vlans;
69 table->total = 0;
70}
71
72static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
73 __be64 *entries)
74{
75 struct mlx4_cmd_mailbox *mailbox;
76 u32 in_mod;
77 int err;
78
79 mailbox = mlx4_alloc_cmd_mailbox(dev);
80 if (IS_ERR(mailbox))
81 return PTR_ERR(mailbox);
82
83 memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
84
85 in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
86 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
87 MLX4_CMD_TIME_CLASS_B);
88
89 mlx4_free_cmd_mailbox(dev, mailbox);
90 return err;
91}
92
93int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
94{
95 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
96 int i, err = 0;
97 int free = -1;
98
99 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
100 mutex_lock(&table->mutex);
101 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
102 if (free < 0 && !table->refs[i]) {
103 free = i;
104 continue;
105 }
106
107 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
108 /* MAC already registered, increase refernce count */
109 *index = i;
110 ++table->refs[i];
111 goto out;
112 }
113 }
114 mlx4_dbg(dev, "Free MAC index is %d\n", free);
115
116 if (table->total == table->max) {
117 /* No free mac entries */
118 err = -ENOSPC;
119 goto out;
120 }
121
122 /* Register new MAC */
123 table->refs[free] = 1;
124 table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
125
126 err = mlx4_set_port_mac_table(dev, port, table->entries);
127 if (unlikely(err)) {
128 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
129 table->refs[free] = 0;
130 table->entries[free] = 0;
131 goto out;
132 }
133
134 *index = free;
135 ++table->total;
136out:
137 mutex_unlock(&table->mutex);
138 return err;
139}
140EXPORT_SYMBOL_GPL(mlx4_register_mac);
141
142void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index)
143{
144 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
145
146 mutex_lock(&table->mutex);
147 if (!table->refs[index]) {
148 mlx4_warn(dev, "No MAC entry for index %d\n", index);
149 goto out;
150 }
151 if (--table->refs[index]) {
152 mlx4_warn(dev, "Have more references for index %d,"
153 "no need to modify MAC table\n", index);
154 goto out;
155 }
156 table->entries[index] = 0;
157 mlx4_set_port_mac_table(dev, port, table->entries);
158 --table->total;
159out:
160 mutex_unlock(&table->mutex);
161}
162EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
163
164static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
165 __be32 *entries)
166{
167 struct mlx4_cmd_mailbox *mailbox;
168 u32 in_mod;
169 int err;
170
171 mailbox = mlx4_alloc_cmd_mailbox(dev);
172 if (IS_ERR(mailbox))
173 return PTR_ERR(mailbox);
174
175 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
176 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
177 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
178 MLX4_CMD_TIME_CLASS_B);
179
180 mlx4_free_cmd_mailbox(dev, mailbox);
181
182 return err;
183}
184
185int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
186{
187 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
188 int i, err = 0;
189 int free = -1;
190
191 mutex_lock(&table->mutex);
192 for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
193 if (free < 0 && (table->refs[i] == 0)) {
194 free = i;
195 continue;
196 }
197
198 if (table->refs[i] &&
199 (vlan == (MLX4_VLAN_MASK &
200 be32_to_cpu(table->entries[i])))) {
201 /* Vlan already registered, increase refernce count */
202 *index = i;
203 ++table->refs[i];
204 goto out;
205 }
206 }
207
208 if (table->total == table->max) {
209 /* No free vlan entries */
210 err = -ENOSPC;
211 goto out;
212 }
213
214 /* Register new MAC */
215 table->refs[free] = 1;
216 table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
217
218 err = mlx4_set_port_vlan_table(dev, port, table->entries);
219 if (unlikely(err)) {
220 mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
221 table->refs[free] = 0;
222 table->entries[free] = 0;
223 goto out;
224 }
225
226 *index = free;
227 ++table->total;
228out:
229 mutex_unlock(&table->mutex);
230 return err;
231}
232EXPORT_SYMBOL_GPL(mlx4_register_vlan);
233
234void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
235{
236 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
237
238 if (index < MLX4_VLAN_REGULAR) {
239 mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
240 return;
241 }
242
243 mutex_lock(&table->mutex);
244 if (!table->refs[index]) {
245 mlx4_warn(dev, "No vlan entry for index %d\n", index);
246 goto out;
247 }
248 if (--table->refs[index]) {
249 mlx4_dbg(dev, "Have more references for index %d,"
250 "no need to modify vlan table\n", index);
251 goto out;
252 }
253 table->entries[index] = 0;
254 mlx4_set_port_vlan_table(dev, port, table->entries);
255 --table->total;
256out:
257 mutex_unlock(&table->mutex);
258}
259EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
260
261int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
262{
263 struct mlx4_cmd_mailbox *mailbox;
264 int err;
265 u8 is_eth = dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
266
267 mailbox = mlx4_alloc_cmd_mailbox(dev);
268 if (IS_ERR(mailbox))
269 return PTR_ERR(mailbox);
270
271 memset(mailbox->buf, 0, 256);
272 if (is_eth) {
273 ((u8 *) mailbox->buf)[3] = 6;
274 ((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15);
275 ((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15);
276 }
277 err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
278 MLX4_CMD_TIME_CLASS_B);
279
280 mlx4_free_cmd_mailbox(dev, mailbox);
281 return err;
282}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index c49a86044bf7..1c565ef8d179 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -147,19 +147,42 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
147} 147}
148EXPORT_SYMBOL_GPL(mlx4_qp_modify); 148EXPORT_SYMBOL_GPL(mlx4_qp_modify);
149 149
150int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp) 150int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
151{
152 struct mlx4_priv *priv = mlx4_priv(dev);
153 struct mlx4_qp_table *qp_table = &priv->qp_table;
154 int qpn;
155
156 qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
157 if (qpn == -1)
158 return -ENOMEM;
159
160 *base = qpn;
161 return 0;
162}
163EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
164
165void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
166{
167 struct mlx4_priv *priv = mlx4_priv(dev);
168 struct mlx4_qp_table *qp_table = &priv->qp_table;
169 if (base_qpn < dev->caps.sqp_start + 8)
170 return;
171
172 mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
173}
174EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
175
176int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
151{ 177{
152 struct mlx4_priv *priv = mlx4_priv(dev); 178 struct mlx4_priv *priv = mlx4_priv(dev);
153 struct mlx4_qp_table *qp_table = &priv->qp_table; 179 struct mlx4_qp_table *qp_table = &priv->qp_table;
154 int err; 180 int err;
155 181
156 if (sqpn) 182 if (!qpn)
157 qp->qpn = sqpn; 183 return -EINVAL;
158 else { 184
159 qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap); 185 qp->qpn = qpn;
160 if (qp->qpn == -1)
161 return -ENOMEM;
162 }
163 186
164 err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); 187 err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
165 if (err) 188 if (err)
@@ -208,9 +231,6 @@ err_put_qp:
208 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); 231 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
209 232
210err_out: 233err_out:
211 if (!sqpn)
212 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
213
214 return err; 234 return err;
215} 235}
216EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 236EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
@@ -239,9 +259,6 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
239 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); 259 mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
240 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); 260 mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
241 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); 261 mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
242
243 if (qp->qpn >= dev->caps.sqp_start + 8)
244 mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
245} 262}
246EXPORT_SYMBOL_GPL(mlx4_qp_free); 263EXPORT_SYMBOL_GPL(mlx4_qp_free);
247 264
@@ -255,6 +272,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
255{ 272{
256 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 273 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
257 int err; 274 int err;
275 int reserved_from_top = 0;
258 276
259 spin_lock_init(&qp_table->lock); 277 spin_lock_init(&qp_table->lock);
260 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); 278 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
@@ -264,9 +282,40 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
264 * block of special QPs must be aligned to a multiple of 8, so 282 * block of special QPs must be aligned to a multiple of 8, so
265 * round up. 283 * round up.
266 */ 284 */
267 dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8); 285 dev->caps.sqp_start =
286 ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
287
288 {
289 int sort[MLX4_NUM_QP_REGION];
290 int i, j, tmp;
291 int last_base = dev->caps.num_qps;
292
293 for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
294 sort[i] = i;
295
296 for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
297 for (j = 2; j < i; ++j) {
298 if (dev->caps.reserved_qps_cnt[sort[j]] >
299 dev->caps.reserved_qps_cnt[sort[j - 1]]) {
300 tmp = sort[j];
301 sort[j] = sort[j - 1];
302 sort[j - 1] = tmp;
303 }
304 }
305 }
306
307 for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
308 last_base -= dev->caps.reserved_qps_cnt[sort[i]];
309 dev->caps.reserved_qps_base[sort[i]] = last_base;
310 reserved_from_top +=
311 dev->caps.reserved_qps_cnt[sort[i]];
312 }
313
314 }
315
268 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, 316 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
269 (1 << 24) - 1, dev->caps.sqp_start + 8); 317 (1 << 23) - 1, dev->caps.sqp_start + 8,
318 reserved_from_top);
270 if (err) 319 if (err)
271 return err; 320 return err;
272 321
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index 533eb6db24b3..fe9f218691f5 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -245,7 +245,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
245 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); 245 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
246 246
247 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, 247 err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
248 dev->caps.num_srqs - 1, dev->caps.reserved_srqs); 248 dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
249 if (err) 249 if (err)
250 return err; 250 return err;
251 251
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
new file mode 100644
index 000000000000..da42aa06a3ba
--- /dev/null
+++ b/drivers/net/xtsonic.c
@@ -0,0 +1,319 @@
1/*
2 * xtsonic.c
3 *
4 * (C) 2001 - 2007 Tensilica Inc.
5 * Kevin Chea <kchea@yahoo.com>
6 * Marc Gauthier <marc@linux-xtensa.org>
7 * Chris Zankel <chris@zankel.net>
8 *
9 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
10 *
11 * This driver is based on work from Andreas Busse, but most of
12 * the code is rewritten.
13 *
14 * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
15 *
16 * A driver for the onboard Sonic ethernet controller on the XT2000.
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/fcntl.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/in.h>
27#include <linux/slab.h>
28#include <linux/string.h>
29#include <linux/delay.h>
30#include <linux/errno.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/platform_device.h>
35#include <linux/dma-mapping.h>
36
37#include <asm/io.h>
38#include <asm/pgtable.h>
39#include <asm/dma.h>
40
41static char xtsonic_string[] = "xtsonic";
42
43extern unsigned xtboard_nvram_valid(void);
44extern void xtboard_get_ether_addr(unsigned char *buf);
45
46#include "sonic.h"
47
48/*
49 * According to the documentation for the Sonic ethernet controller,
50 * EOBC should be 760 words (1520 bytes) for 32-bit applications, and,
51 * as such, 2 words less than the buffer size. The value for RBSIZE
52 * defined in sonic.h, however is only 1520.
53 *
54 * (Note that in 16-bit configurations, EOBC is 759 words (1518 bytes) and
55 * RBSIZE 1520 bytes)
56 */
57#undef SONIC_RBSIZE
58#define SONIC_RBSIZE 1524
59
60/*
61 * The chip provides 256 byte register space.
62 */
63#define SONIC_MEM_SIZE 0x100
64
65/*
66 * Macros to access SONIC registers
67 */
68#define SONIC_READ(reg) \
69 (0xffff & *((volatile unsigned int *)dev->base_addr+reg))
70
71#define SONIC_WRITE(reg,val) \
72 *((volatile unsigned int *)dev->base_addr+reg) = val
73
74
75/* Use 0 for production, 1 for verification, and >2 for debug */
76#ifdef SONIC_DEBUG
77static unsigned int sonic_debug = SONIC_DEBUG;
78#else
79static unsigned int sonic_debug = 1;
80#endif
81
82/*
83 * We cannot use station (ethernet) address prefixes to detect the
84 * sonic controller since these are board manufacturer depended.
85 * So we check for known Silicon Revision IDs instead.
86 */
87static unsigned short known_revisions[] =
88{
89 0x101, /* SONIC 83934 */
90 0xffff /* end of list */
91};
92
93static int xtsonic_open(struct net_device *dev)
94{
95 if (request_irq(dev->irq,&sonic_interrupt,IRQF_DISABLED,"sonic",dev)) {
96 printk(KERN_ERR "%s: unable to get IRQ %d.\n",
97 dev->name, dev->irq);
98 return -EAGAIN;
99 }
100 return sonic_open(dev);
101}
102
103static int xtsonic_close(struct net_device *dev)
104{
105 int err;
106 err = sonic_close(dev);
107 free_irq(dev->irq, dev);
108 return err;
109}
110
111static int __init sonic_probe1(struct net_device *dev)
112{
113 static unsigned version_printed = 0;
114 unsigned int silicon_revision;
115 struct sonic_local *lp = netdev_priv(dev);
116 unsigned int base_addr = dev->base_addr;
117 int i;
118 int err = 0;
119
120 if (!request_mem_region(base_addr, 0x100, xtsonic_string))
121 return -EBUSY;
122
123 /*
124 * get the Silicon Revision ID. If this is one of the known
125 * one assume that we found a SONIC ethernet controller at
126 * the expected location.
127 */
128 silicon_revision = SONIC_READ(SONIC_SR);
129 if (sonic_debug > 1)
130 printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
131
132 i = 0;
133 while ((known_revisions[i] != 0xffff) &&
134 (known_revisions[i] != silicon_revision))
135 i++;
136
137 if (known_revisions[i] == 0xffff) {
138 printk("SONIC ethernet controller not found (0x%4x)\n",
139 silicon_revision);
140 return -ENODEV;
141 }
142
143 if (sonic_debug && version_printed++ == 0)
144 printk(version);
145
146 /*
147 * Put the sonic into software reset, then retrieve ethernet address.
148 * Note: we are assuming that the boot-loader has initialized the cam.
149 */
150 SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
151 SONIC_WRITE(SONIC_DCR,
152 SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS);
153 SONIC_WRITE(SONIC_CEP,0);
154 SONIC_WRITE(SONIC_IMR,0);
155
156 SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
157 SONIC_WRITE(SONIC_CEP,0);
158
159 for (i=0; i<3; i++) {
160 unsigned int val = SONIC_READ(SONIC_CAP0-i);
161 dev->dev_addr[i*2] = val;
162 dev->dev_addr[i*2+1] = val >> 8;
163 }
164
165 /* Initialize the device structure. */
166
167 lp->dma_bitmode = SONIC_BITMODE32;
168
169 /*
170 * Allocate local private descriptor areas in uncached space.
171 * The entire structure must be located within the same 64kb segment.
172 * A simple way to ensure this is to allocate twice the
173 * size of the structure -- given that the structure is
174 * much less than 64 kB, at least one of the halves of
175 * the allocated area will be contained entirely in 64 kB.
176 * We also allocate extra space for a pointer to allow freeing
177 * this structure later on (in xtsonic_cleanup_module()).
178 */
179 lp->descriptors =
180 dma_alloc_coherent(lp->device,
181 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
182 &lp->descriptors_laddr, GFP_KERNEL);
183
184 if (lp->descriptors == NULL) {
185 printk(KERN_ERR "%s: couldn't alloc DMA memory for "
186 " descriptors.\n", lp->device->bus_id);
187 goto out;
188 }
189
190 lp->cda = lp->descriptors;
191 lp->tda = lp->cda + (SIZEOF_SONIC_CDA
192 * SONIC_BUS_SCALE(lp->dma_bitmode));
193 lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
194 * SONIC_BUS_SCALE(lp->dma_bitmode));
195 lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
196 * SONIC_BUS_SCALE(lp->dma_bitmode));
197
198 /* get the virtual dma address */
199
200 lp->cda_laddr = lp->descriptors_laddr;
201 lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
202 * SONIC_BUS_SCALE(lp->dma_bitmode));
203 lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
204 * SONIC_BUS_SCALE(lp->dma_bitmode));
205 lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
206 * SONIC_BUS_SCALE(lp->dma_bitmode));
207
208 dev->open = xtsonic_open;
209 dev->stop = xtsonic_close;
210 dev->hard_start_xmit = sonic_send_packet;
211 dev->get_stats = sonic_get_stats;
212 dev->set_multicast_list = &sonic_multicast_list;
213 dev->tx_timeout = sonic_tx_timeout;
214 dev->watchdog_timeo = TX_TIMEOUT;
215
216 /*
217 * clear tally counter
218 */
219 SONIC_WRITE(SONIC_CRCT,0xffff);
220 SONIC_WRITE(SONIC_FAET,0xffff);
221 SONIC_WRITE(SONIC_MPT,0xffff);
222
223 return 0;
224out:
225 release_region(dev->base_addr, SONIC_MEM_SIZE);
226 return err;
227}
228
229
230/*
231 * Probe for a SONIC ethernet controller on an XT2000 board.
232 * Actually probing is superfluous but we're paranoid.
233 */
234
235int __init xtsonic_probe(struct platform_device *pdev)
236{
237 struct net_device *dev;
238 struct sonic_local *lp;
239 struct resource *resmem, *resirq;
240 int err = 0;
241
242 DECLARE_MAC_BUF(mac);
243
244 if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL)
245 return -ENODEV;
246
247 if ((resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0)) == NULL)
248 return -ENODEV;
249
250 if ((dev = alloc_etherdev(sizeof(struct sonic_local))) == NULL)
251 return -ENOMEM;
252
253 lp = netdev_priv(dev);
254 lp->device = &pdev->dev;
255 SET_NETDEV_DEV(dev, &pdev->dev);
256 netdev_boot_setup_check(dev);
257
258 dev->base_addr = resmem->start;
259 dev->irq = resirq->start;
260
261 if ((err = sonic_probe1(dev)))
262 goto out;
263 if ((err = register_netdev(dev)))
264 goto out1;
265
266 printk("%s: SONIC ethernet @%08lx, MAC %s, IRQ %d\n", dev->name,
267 dev->base_addr, print_mac(mac, dev->dev_addr), dev->irq);
268
269 return 0;
270
271out1:
272 release_region(dev->base_addr, SONIC_MEM_SIZE);
273out:
274 free_netdev(dev);
275
276 return err;
277}
278
279MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver");
280module_param(sonic_debug, int, 0);
281MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)");
282
283#include "sonic.c"
284
285static int __devexit xtsonic_device_remove (struct platform_device *pdev)
286{
287 struct net_device *dev = platform_get_drvdata(pdev);
288 struct sonic_local *lp = netdev_priv(dev);
289
290 unregister_netdev(dev);
291 dma_free_coherent(lp->device,
292 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
293 lp->descriptors, lp->descriptors_laddr);
294 release_region (dev->base_addr, SONIC_MEM_SIZE);
295 free_netdev(dev);
296
297 return 0;
298}
299
300static struct platform_driver xtsonic_driver = {
301 .probe = xtsonic_probe,
302 .remove = __devexit_p(xtsonic_device_remove),
303 .driver = {
304 .name = xtsonic_string,
305 },
306};
307
308static int __init xtsonic_init(void)
309{
310 return platform_driver_register(&xtsonic_driver);
311}
312
313static void __exit xtsonic_cleanup(void)
314{
315 platform_driver_unregister(&xtsonic_driver);
316}
317
318module_init(xtsonic_init);
319module_exit(xtsonic_cleanup);
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index 6a98dc8aa30b..24bbef777c19 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -41,7 +41,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
41 41
42 info.addr = *addr; 42 info.addr = *addr;
43 43
44 request_module(info.type); 44 request_module("%s", info.type);
45 45
46 result = i2c_new_device(adap, &info); 46 result = i2c_new_device(adap, &info);
47 if (result == NULL) { 47 if (result == NULL) {
diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c
index b01eec026f68..bed0ed6dcdc1 100644
--- a/drivers/of/of_spi.c
+++ b/drivers/of/of_spi.c
@@ -61,6 +61,8 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np)
61 spi->mode |= SPI_CPHA; 61 spi->mode |= SPI_CPHA;
62 if (of_find_property(nc, "spi-cpol", NULL)) 62 if (of_find_property(nc, "spi-cpol", NULL))
63 spi->mode |= SPI_CPOL; 63 spi->mode |= SPI_CPOL;
64 if (of_find_property(nc, "spi-cs-high", NULL))
65 spi->mode |= SPI_CS_HIGH;
64 66
65 /* Device speed */ 67 /* Device speed */
66 prop = of_get_property(nc, "spi-max-frequency", &len); 68 prop = of_get_property(nc, "spi-max-frequency", &len);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index ed982273fb8b..b55cd23ffdef 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -41,7 +41,6 @@ static cpumask_t marked_cpus = CPU_MASK_NONE;
41static DEFINE_SPINLOCK(task_mortuary); 41static DEFINE_SPINLOCK(task_mortuary);
42static void process_task_mortuary(void); 42static void process_task_mortuary(void);
43 43
44
45/* Take ownership of the task struct and place it on the 44/* Take ownership of the task struct and place it on the
46 * list for processing. Only after two full buffer syncs 45 * list for processing. Only after two full buffer syncs
47 * does the task eventually get freed, because by then 46 * does the task eventually get freed, because by then
@@ -341,7 +340,7 @@ static void add_trace_begin(void)
341 * Add IBS fetch and op entries to event buffer 340 * Add IBS fetch and op entries to event buffer
342 */ 341 */
343static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code, 342static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code,
344 int in_kernel, struct mm_struct *mm) 343 struct mm_struct *mm)
345{ 344{
346 unsigned long rip; 345 unsigned long rip;
347 int i, count; 346 int i, count;
@@ -565,9 +564,11 @@ void sync_buffer(int cpu)
565 struct task_struct *new; 564 struct task_struct *new;
566 unsigned long cookie = 0; 565 unsigned long cookie = 0;
567 int in_kernel = 1; 566 int in_kernel = 1;
568 unsigned int i;
569 sync_buffer_state state = sb_buffer_start; 567 sync_buffer_state state = sb_buffer_start;
568#ifndef CONFIG_OPROFILE_IBS
569 unsigned int i;
570 unsigned long available; 570 unsigned long available;
571#endif
571 572
572 mutex_lock(&buffer_mutex); 573 mutex_lock(&buffer_mutex);
573 574
@@ -575,9 +576,13 @@ void sync_buffer(int cpu)
575 576
576 /* Remember, only we can modify tail_pos */ 577 /* Remember, only we can modify tail_pos */
577 578
579#ifndef CONFIG_OPROFILE_IBS
578 available = get_slots(cpu_buf); 580 available = get_slots(cpu_buf);
579 581
580 for (i = 0; i < available; ++i) { 582 for (i = 0; i < available; ++i) {
583#else
584 while (get_slots(cpu_buf)) {
585#endif
581 struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos]; 586 struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos];
582 587
583 if (is_code(s->eip)) { 588 if (is_code(s->eip)) {
@@ -593,12 +598,10 @@ void sync_buffer(int cpu)
593#ifdef CONFIG_OPROFILE_IBS 598#ifdef CONFIG_OPROFILE_IBS
594 } else if (s->event == IBS_FETCH_BEGIN) { 599 } else if (s->event == IBS_FETCH_BEGIN) {
595 state = sb_bt_start; 600 state = sb_bt_start;
596 add_ibs_begin(cpu_buf, 601 add_ibs_begin(cpu_buf, IBS_FETCH_CODE, mm);
597 IBS_FETCH_CODE, in_kernel, mm);
598 } else if (s->event == IBS_OP_BEGIN) { 602 } else if (s->event == IBS_OP_BEGIN) {
599 state = sb_bt_start; 603 state = sb_bt_start;
600 add_ibs_begin(cpu_buf, 604 add_ibs_begin(cpu_buf, IBS_OP_CODE, mm);
601 IBS_OP_CODE, in_kernel, mm);
602#endif 605#endif
603 } else { 606 } else {
604 struct mm_struct *oldmm = mm; 607 struct mm_struct *oldmm = mm;
@@ -628,3 +631,27 @@ void sync_buffer(int cpu)
628 631
629 mutex_unlock(&buffer_mutex); 632 mutex_unlock(&buffer_mutex);
630} 633}
634
635/* The function can be used to add a buffer worth of data directly to
636 * the kernel buffer. The buffer is assumed to be a circular buffer.
637 * Take the entries from index start and end at index end, wrapping
638 * at max_entries.
639 */
640void oprofile_put_buff(unsigned long *buf, unsigned int start,
641 unsigned int stop, unsigned int max)
642{
643 int i;
644
645 i = start;
646
647 mutex_lock(&buffer_mutex);
648 while (i != stop) {
649 add_event_entry(buf[i++]);
650
651 if (i >= max)
652 i = 0;
653 }
654
655 mutex_unlock(&buffer_mutex);
656}
657
diff --git a/drivers/oprofile/buffer_sync.h b/drivers/oprofile/buffer_sync.h
index 08866f6a96a3..3110732c1835 100644
--- a/drivers/oprofile/buffer_sync.h
+++ b/drivers/oprofile/buffer_sync.h
@@ -9,13 +9,13 @@
9 9
10#ifndef OPROFILE_BUFFER_SYNC_H 10#ifndef OPROFILE_BUFFER_SYNC_H
11#define OPROFILE_BUFFER_SYNC_H 11#define OPROFILE_BUFFER_SYNC_H
12 12
13/* add the necessary profiling hooks */ 13/* add the necessary profiling hooks */
14int sync_start(void); 14int sync_start(void);
15 15
16/* remove the hooks */ 16/* remove the hooks */
17void sync_stop(void); 17void sync_stop(void);
18 18
19/* sync the given CPU's buffer */ 19/* sync the given CPU's buffer */
20void sync_buffer(int cpu); 20void sync_buffer(int cpu);
21 21
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index e1bd5a937f6c..01d38e78cde1 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -22,7 +22,7 @@
22#include <linux/oprofile.h> 22#include <linux/oprofile.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25 25
26#include "event_buffer.h" 26#include "event_buffer.h"
27#include "cpu_buffer.h" 27#include "cpu_buffer.h"
28#include "buffer_sync.h" 28#include "buffer_sync.h"
@@ -38,27 +38,40 @@ static int work_enabled;
38void free_cpu_buffers(void) 38void free_cpu_buffers(void)
39{ 39{
40 int i; 40 int i;
41 41
42 for_each_online_cpu(i) { 42 for_each_possible_cpu(i) {
43 vfree(per_cpu(cpu_buffer, i).buffer); 43 vfree(per_cpu(cpu_buffer, i).buffer);
44 per_cpu(cpu_buffer, i).buffer = NULL; 44 per_cpu(cpu_buffer, i).buffer = NULL;
45 } 45 }
46} 46}
47 47
48unsigned long oprofile_get_cpu_buffer_size(void)
49{
50 return fs_cpu_buffer_size;
51}
52
53void oprofile_cpu_buffer_inc_smpl_lost(void)
54{
55 struct oprofile_cpu_buffer *cpu_buf
56 = &__get_cpu_var(cpu_buffer);
57
58 cpu_buf->sample_lost_overflow++;
59}
60
48int alloc_cpu_buffers(void) 61int alloc_cpu_buffers(void)
49{ 62{
50 int i; 63 int i;
51 64
52 unsigned long buffer_size = fs_cpu_buffer_size; 65 unsigned long buffer_size = fs_cpu_buffer_size;
53 66
54 for_each_online_cpu(i) { 67 for_each_possible_cpu(i) {
55 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); 68 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
56 69
57 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, 70 b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
58 cpu_to_node(i)); 71 cpu_to_node(i));
59 if (!b->buffer) 72 if (!b->buffer)
60 goto fail; 73 goto fail;
61 74
62 b->last_task = NULL; 75 b->last_task = NULL;
63 b->last_is_kernel = -1; 76 b->last_is_kernel = -1;
64 b->tracing = 0; 77 b->tracing = 0;
@@ -112,7 +125,7 @@ void end_cpu_work(void)
112} 125}
113 126
114/* Resets the cpu buffer to a sane state. */ 127/* Resets the cpu buffer to a sane state. */
115void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf) 128void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf)
116{ 129{
117 /* reset these to invalid values; the next sample 130 /* reset these to invalid values; the next sample
118 * collected will populate the buffer with proper 131 * collected will populate the buffer with proper
@@ -123,7 +136,7 @@ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
123} 136}
124 137
125/* compute number of available slots in cpu_buffer queue */ 138/* compute number of available slots in cpu_buffer queue */
126static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b) 139static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b)
127{ 140{
128 unsigned long head = b->head_pos; 141 unsigned long head = b->head_pos;
129 unsigned long tail = b->tail_pos; 142 unsigned long tail = b->tail_pos;
@@ -134,7 +147,7 @@ static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
134 return tail + (b->buffer_size - head) - 1; 147 return tail + (b->buffer_size - head) - 1;
135} 148}
136 149
137static void increment_head(struct oprofile_cpu_buffer * b) 150static void increment_head(struct oprofile_cpu_buffer *b)
138{ 151{
139 unsigned long new_head = b->head_pos + 1; 152 unsigned long new_head = b->head_pos + 1;
140 153
@@ -149,17 +162,17 @@ static void increment_head(struct oprofile_cpu_buffer * b)
149} 162}
150 163
151static inline void 164static inline void
152add_sample(struct oprofile_cpu_buffer * cpu_buf, 165add_sample(struct oprofile_cpu_buffer *cpu_buf,
153 unsigned long pc, unsigned long event) 166 unsigned long pc, unsigned long event)
154{ 167{
155 struct op_sample * entry = &cpu_buf->buffer[cpu_buf->head_pos]; 168 struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos];
156 entry->eip = pc; 169 entry->eip = pc;
157 entry->event = event; 170 entry->event = event;
158 increment_head(cpu_buf); 171 increment_head(cpu_buf);
159} 172}
160 173
161static inline void 174static inline void
162add_code(struct oprofile_cpu_buffer * buffer, unsigned long value) 175add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
163{ 176{
164 add_sample(buffer, ESCAPE_CODE, value); 177 add_sample(buffer, ESCAPE_CODE, value);
165} 178}
@@ -173,10 +186,10 @@ add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
173 * pc. We tag this in the buffer by generating kernel enter/exit 186 * pc. We tag this in the buffer by generating kernel enter/exit
174 * events whenever is_kernel changes 187 * events whenever is_kernel changes
175 */ 188 */
176static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, 189static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
177 int is_kernel, unsigned long event) 190 int is_kernel, unsigned long event)
178{ 191{
179 struct task_struct * task; 192 struct task_struct *task;
180 193
181 cpu_buf->sample_received++; 194 cpu_buf->sample_received++;
182 195
@@ -205,7 +218,7 @@ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
205 cpu_buf->last_task = task; 218 cpu_buf->last_task = task;
206 add_code(cpu_buf, (unsigned long)task); 219 add_code(cpu_buf, (unsigned long)task);
207 } 220 }
208 221
209 add_sample(cpu_buf, pc, event); 222 add_sample(cpu_buf, pc, event);
210 return 1; 223 return 1;
211} 224}
@@ -222,7 +235,7 @@ static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
222 return 1; 235 return 1;
223} 236}
224 237
225static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf) 238static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
226{ 239{
227 cpu_buf->tracing = 0; 240 cpu_buf->tracing = 0;
228} 241}
@@ -257,21 +270,23 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
257 270
258#ifdef CONFIG_OPROFILE_IBS 271#ifdef CONFIG_OPROFILE_IBS
259 272
260#define MAX_IBS_SAMPLE_SIZE 14 273#define MAX_IBS_SAMPLE_SIZE 14
261static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf, 274
262 unsigned long pc, int is_kernel, unsigned int *ibs, int ibs_code) 275void oprofile_add_ibs_sample(struct pt_regs *const regs,
276 unsigned int *const ibs_sample, int ibs_code)
263{ 277{
278 int is_kernel = !user_mode(regs);
279 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
264 struct task_struct *task; 280 struct task_struct *task;
265 281
266 cpu_buf->sample_received++; 282 cpu_buf->sample_received++;
267 283
268 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { 284 if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) {
285 /* we can't backtrace since we lost the source of this event */
269 cpu_buf->sample_lost_overflow++; 286 cpu_buf->sample_lost_overflow++;
270 return 0; 287 return;
271 } 288 }
272 289
273 is_kernel = !!is_kernel;
274
275 /* notice a switch from user->kernel or vice versa */ 290 /* notice a switch from user->kernel or vice versa */
276 if (cpu_buf->last_is_kernel != is_kernel) { 291 if (cpu_buf->last_is_kernel != is_kernel) {
277 cpu_buf->last_is_kernel = is_kernel; 292 cpu_buf->last_is_kernel = is_kernel;
@@ -281,7 +296,6 @@ static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
281 /* notice a task switch */ 296 /* notice a task switch */
282 if (!is_kernel) { 297 if (!is_kernel) {
283 task = current; 298 task = current;
284
285 if (cpu_buf->last_task != task) { 299 if (cpu_buf->last_task != task) {
286 cpu_buf->last_task = task; 300 cpu_buf->last_task = task;
287 add_code(cpu_buf, (unsigned long)task); 301 add_code(cpu_buf, (unsigned long)task);
@@ -289,36 +303,17 @@ static int log_ibs_sample(struct oprofile_cpu_buffer *cpu_buf,
289 } 303 }
290 304
291 add_code(cpu_buf, ibs_code); 305 add_code(cpu_buf, ibs_code);
292 add_sample(cpu_buf, ibs[0], ibs[1]); 306 add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
293 add_sample(cpu_buf, ibs[2], ibs[3]); 307 add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
294 add_sample(cpu_buf, ibs[4], ibs[5]); 308 add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
295 309
296 if (ibs_code == IBS_OP_BEGIN) { 310 if (ibs_code == IBS_OP_BEGIN) {
297 add_sample(cpu_buf, ibs[6], ibs[7]); 311 add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
298 add_sample(cpu_buf, ibs[8], ibs[9]); 312 add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
299 add_sample(cpu_buf, ibs[10], ibs[11]); 313 add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
300 }
301
302 return 1;
303}
304
305void oprofile_add_ibs_sample(struct pt_regs *const regs,
306 unsigned int * const ibs_sample, u8 code)
307{
308 int is_kernel = !user_mode(regs);
309 unsigned long pc = profile_pc(regs);
310
311 struct oprofile_cpu_buffer *cpu_buf =
312 &per_cpu(cpu_buffer, smp_processor_id());
313
314 if (!backtrace_depth) {
315 log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
316 return;
317 } 314 }
318 315
319 /* if log_sample() fails we can't backtrace since we lost the source 316 if (backtrace_depth)
320 * of this event */
321 if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
322 oprofile_ops.backtrace(regs, backtrace_depth); 317 oprofile_ops.backtrace(regs, backtrace_depth);
323} 318}
324 319
@@ -363,11 +358,16 @@ void oprofile_add_trace(unsigned long pc)
363 */ 358 */
364static void wq_sync_buffer(struct work_struct *work) 359static void wq_sync_buffer(struct work_struct *work)
365{ 360{
366 struct oprofile_cpu_buffer * b = 361 struct oprofile_cpu_buffer *b =
367 container_of(work, struct oprofile_cpu_buffer, work.work); 362 container_of(work, struct oprofile_cpu_buffer, work.work);
368 if (b->cpu != smp_processor_id()) { 363 if (b->cpu != smp_processor_id()) {
369 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", 364 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
370 smp_processor_id(), b->cpu); 365 smp_processor_id(), b->cpu);
366
367 if (!cpu_online(b->cpu)) {
368 cancel_delayed_work(&b->work);
369 return;
370 }
371 } 371 }
372 sync_buffer(b->cpu); 372 sync_buffer(b->cpu);
373 373
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 9c44d004da69..d3cc26264db5 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -15,9 +15,9 @@
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/cache.h> 16#include <linux/cache.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18 18
19struct task_struct; 19struct task_struct;
20 20
21int alloc_cpu_buffers(void); 21int alloc_cpu_buffers(void);
22void free_cpu_buffers(void); 22void free_cpu_buffers(void);
23 23
@@ -31,15 +31,15 @@ struct op_sample {
31 unsigned long eip; 31 unsigned long eip;
32 unsigned long event; 32 unsigned long event;
33}; 33};
34 34
35struct oprofile_cpu_buffer { 35struct oprofile_cpu_buffer {
36 volatile unsigned long head_pos; 36 volatile unsigned long head_pos;
37 volatile unsigned long tail_pos; 37 volatile unsigned long tail_pos;
38 unsigned long buffer_size; 38 unsigned long buffer_size;
39 struct task_struct * last_task; 39 struct task_struct *last_task;
40 int last_is_kernel; 40 int last_is_kernel;
41 int tracing; 41 int tracing;
42 struct op_sample * buffer; 42 struct op_sample *buffer;
43 unsigned long sample_received; 43 unsigned long sample_received;
44 unsigned long sample_lost_overflow; 44 unsigned long sample_lost_overflow;
45 unsigned long backtrace_aborted; 45 unsigned long backtrace_aborted;
@@ -50,7 +50,7 @@ struct oprofile_cpu_buffer {
50 50
51DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 51DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
52 52
53void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf); 53void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf);
54 54
55/* transient events for the CPU buffer -> event buffer */ 55/* transient events for the CPU buffer -> event buffer */
56#define CPU_IS_KERNEL 1 56#define CPU_IS_KERNEL 1
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 8d692a5c8e73..d962ba0dd87a 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -19,16 +19,16 @@
19#include <linux/dcookies.h> 19#include <linux/dcookies.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22 22
23#include "oprof.h" 23#include "oprof.h"
24#include "event_buffer.h" 24#include "event_buffer.h"
25#include "oprofile_stats.h" 25#include "oprofile_stats.h"
26 26
27DEFINE_MUTEX(buffer_mutex); 27DEFINE_MUTEX(buffer_mutex);
28 28
29static unsigned long buffer_opened; 29static unsigned long buffer_opened;
30static DECLARE_WAIT_QUEUE_HEAD(buffer_wait); 30static DECLARE_WAIT_QUEUE_HEAD(buffer_wait);
31static unsigned long * event_buffer; 31static unsigned long *event_buffer;
32static unsigned long buffer_size; 32static unsigned long buffer_size;
33static unsigned long buffer_watershed; 33static unsigned long buffer_watershed;
34static size_t buffer_pos; 34static size_t buffer_pos;
@@ -66,7 +66,7 @@ void wake_up_buffer_waiter(void)
66 mutex_unlock(&buffer_mutex); 66 mutex_unlock(&buffer_mutex);
67} 67}
68 68
69 69
70int alloc_event_buffer(void) 70int alloc_event_buffer(void)
71{ 71{
72 int err = -ENOMEM; 72 int err = -ENOMEM;
@@ -76,13 +76,13 @@ int alloc_event_buffer(void)
76 buffer_size = fs_buffer_size; 76 buffer_size = fs_buffer_size;
77 buffer_watershed = fs_buffer_watershed; 77 buffer_watershed = fs_buffer_watershed;
78 spin_unlock_irqrestore(&oprofilefs_lock, flags); 78 spin_unlock_irqrestore(&oprofilefs_lock, flags);
79 79
80 if (buffer_watershed >= buffer_size) 80 if (buffer_watershed >= buffer_size)
81 return -EINVAL; 81 return -EINVAL;
82 82
83 event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); 83 event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
84 if (!event_buffer) 84 if (!event_buffer)
85 goto out; 85 goto out;
86 86
87 err = 0; 87 err = 0;
88out: 88out:
@@ -97,8 +97,8 @@ void free_event_buffer(void)
97 event_buffer = NULL; 97 event_buffer = NULL;
98} 98}
99 99
100 100
101static int event_buffer_open(struct inode * inode, struct file * file) 101static int event_buffer_open(struct inode *inode, struct file *file)
102{ 102{
103 int err = -EPERM; 103 int err = -EPERM;
104 104
@@ -116,14 +116,14 @@ static int event_buffer_open(struct inode * inode, struct file * file)
116 file->private_data = dcookie_register(); 116 file->private_data = dcookie_register();
117 if (!file->private_data) 117 if (!file->private_data)
118 goto out; 118 goto out;
119 119
120 if ((err = oprofile_setup())) 120 if ((err = oprofile_setup()))
121 goto fail; 121 goto fail;
122 122
123 /* NB: the actual start happens from userspace 123 /* NB: the actual start happens from userspace
124 * echo 1 >/dev/oprofile/enable 124 * echo 1 >/dev/oprofile/enable
125 */ 125 */
126 126
127 return 0; 127 return 0;
128 128
129fail: 129fail:
@@ -134,7 +134,7 @@ out:
134} 134}
135 135
136 136
137static int event_buffer_release(struct inode * inode, struct file * file) 137static int event_buffer_release(struct inode *inode, struct file *file)
138{ 138{
139 oprofile_stop(); 139 oprofile_stop();
140 oprofile_shutdown(); 140 oprofile_shutdown();
@@ -146,8 +146,8 @@ static int event_buffer_release(struct inode * inode, struct file * file)
146} 146}
147 147
148 148
149static ssize_t event_buffer_read(struct file * file, char __user * buf, 149static ssize_t event_buffer_read(struct file *file, char __user *buf,
150 size_t count, loff_t * offset) 150 size_t count, loff_t *offset)
151{ 151{
152 int retval = -EINVAL; 152 int retval = -EINVAL;
153 size_t const max = buffer_size * sizeof(unsigned long); 153 size_t const max = buffer_size * sizeof(unsigned long);
@@ -172,18 +172,18 @@ static ssize_t event_buffer_read(struct file * file, char __user * buf,
172 retval = -EFAULT; 172 retval = -EFAULT;
173 173
174 count = buffer_pos * sizeof(unsigned long); 174 count = buffer_pos * sizeof(unsigned long);
175 175
176 if (copy_to_user(buf, event_buffer, count)) 176 if (copy_to_user(buf, event_buffer, count))
177 goto out; 177 goto out;
178 178
179 retval = count; 179 retval = count;
180 buffer_pos = 0; 180 buffer_pos = 0;
181 181
182out: 182out:
183 mutex_unlock(&buffer_mutex); 183 mutex_unlock(&buffer_mutex);
184 return retval; 184 return retval;
185} 185}
186 186
187const struct file_operations event_buffer_fops = { 187const struct file_operations event_buffer_fops = {
188 .open = event_buffer_open, 188 .open = event_buffer_open,
189 .release = event_buffer_release, 189 .release = event_buffer_release,
diff --git a/drivers/oprofile/event_buffer.h b/drivers/oprofile/event_buffer.h
index 5076ed1ebd8f..4e70749f8d16 100644
--- a/drivers/oprofile/event_buffer.h
+++ b/drivers/oprofile/event_buffer.h
@@ -10,13 +10,20 @@
10#ifndef EVENT_BUFFER_H 10#ifndef EVENT_BUFFER_H
11#define EVENT_BUFFER_H 11#define EVENT_BUFFER_H
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <asm/mutex.h> 14#include <asm/mutex.h>
15 15
16int alloc_event_buffer(void); 16int alloc_event_buffer(void);
17 17
18void free_event_buffer(void); 18void free_event_buffer(void);
19 19
20/**
21 * Add data to the event buffer.
22 * The data passed is free-form, but typically consists of
23 * file offsets, dcookies, context information, and ESCAPE codes.
24 */
25void add_event_entry(unsigned long data);
26
20/* wake up the process sleeping on the event file */ 27/* wake up the process sleeping on the event file */
21void wake_up_buffer_waiter(void); 28void wake_up_buffer_waiter(void);
22 29
@@ -24,10 +31,10 @@ void wake_up_buffer_waiter(void);
24#define NO_COOKIE 0UL 31#define NO_COOKIE 0UL
25 32
26extern const struct file_operations event_buffer_fops; 33extern const struct file_operations event_buffer_fops;
27 34
28/* mutex between sync_cpu_buffers() and the 35/* mutex between sync_cpu_buffers() and the
29 * file reading code. 36 * file reading code.
30 */ 37 */
31extern struct mutex buffer_mutex; 38extern struct mutex buffer_mutex;
32 39
33#endif /* EVENT_BUFFER_H */ 40#endif /* EVENT_BUFFER_H */
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index 2c645170f06e..cd375907f26f 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -19,7 +19,7 @@
19#include "cpu_buffer.h" 19#include "cpu_buffer.h"
20#include "buffer_sync.h" 20#include "buffer_sync.h"
21#include "oprofile_stats.h" 21#include "oprofile_stats.h"
22 22
23struct oprofile_operations oprofile_ops; 23struct oprofile_operations oprofile_ops;
24 24
25unsigned long oprofile_started; 25unsigned long oprofile_started;
@@ -36,7 +36,7 @@ static int timer = 0;
36int oprofile_setup(void) 36int oprofile_setup(void)
37{ 37{
38 int err; 38 int err;
39 39
40 mutex_lock(&start_mutex); 40 mutex_lock(&start_mutex);
41 41
42 if ((err = alloc_cpu_buffers())) 42 if ((err = alloc_cpu_buffers()))
@@ -44,10 +44,10 @@ int oprofile_setup(void)
44 44
45 if ((err = alloc_event_buffer())) 45 if ((err = alloc_event_buffer()))
46 goto out1; 46 goto out1;
47 47
48 if (oprofile_ops.setup && (err = oprofile_ops.setup())) 48 if (oprofile_ops.setup && (err = oprofile_ops.setup()))
49 goto out2; 49 goto out2;
50 50
51 /* Note even though this starts part of the 51 /* Note even though this starts part of the
52 * profiling overhead, it's necessary to prevent 52 * profiling overhead, it's necessary to prevent
53 * us missing task deaths and eventually oopsing 53 * us missing task deaths and eventually oopsing
@@ -74,7 +74,7 @@ post_sync:
74 is_setup = 1; 74 is_setup = 1;
75 mutex_unlock(&start_mutex); 75 mutex_unlock(&start_mutex);
76 return 0; 76 return 0;
77 77
78out3: 78out3:
79 if (oprofile_ops.shutdown) 79 if (oprofile_ops.shutdown)
80 oprofile_ops.shutdown(); 80 oprofile_ops.shutdown();
@@ -92,17 +92,17 @@ out:
92int oprofile_start(void) 92int oprofile_start(void)
93{ 93{
94 int err = -EINVAL; 94 int err = -EINVAL;
95 95
96 mutex_lock(&start_mutex); 96 mutex_lock(&start_mutex);
97 97
98 if (!is_setup) 98 if (!is_setup)
99 goto out; 99 goto out;
100 100
101 err = 0; 101 err = 0;
102 102
103 if (oprofile_started) 103 if (oprofile_started)
104 goto out; 104 goto out;
105 105
106 oprofile_reset_stats(); 106 oprofile_reset_stats();
107 107
108 if ((err = oprofile_ops.start())) 108 if ((err = oprofile_ops.start()))
@@ -114,7 +114,7 @@ out:
114 return err; 114 return err;
115} 115}
116 116
117 117
118/* echo 0>/dev/oprofile/enable */ 118/* echo 0>/dev/oprofile/enable */
119void oprofile_stop(void) 119void oprofile_stop(void)
120{ 120{
@@ -204,13 +204,13 @@ static void __exit oprofile_exit(void)
204 oprofile_arch_exit(); 204 oprofile_arch_exit();
205} 205}
206 206
207 207
208module_init(oprofile_init); 208module_init(oprofile_init);
209module_exit(oprofile_exit); 209module_exit(oprofile_exit);
210 210
211module_param_named(timer, timer, int, 0644); 211module_param_named(timer, timer, int, 0644);
212MODULE_PARM_DESC(timer, "force use of timer interrupt"); 212MODULE_PARM_DESC(timer, "force use of timer interrupt");
213 213
214MODULE_LICENSE("GPL"); 214MODULE_LICENSE("GPL");
215MODULE_AUTHOR("John Levon <levon@movementarian.org>"); 215MODULE_AUTHOR("John Levon <levon@movementarian.org>");
216MODULE_DESCRIPTION("OProfile system profiler"); 216MODULE_DESCRIPTION("OProfile system profiler");
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 18323650806e..5df0c21a608f 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -11,7 +11,7 @@
11#define OPROF_H 11#define OPROF_H
12 12
13int oprofile_setup(void); 13int oprofile_setup(void);
14void oprofile_shutdown(void); 14void oprofile_shutdown(void);
15 15
16int oprofilefs_register(void); 16int oprofilefs_register(void);
17void oprofilefs_unregister(void); 17void oprofilefs_unregister(void);
@@ -20,20 +20,20 @@ int oprofile_start(void);
20void oprofile_stop(void); 20void oprofile_stop(void);
21 21
22struct oprofile_operations; 22struct oprofile_operations;
23 23
24extern unsigned long fs_buffer_size; 24extern unsigned long fs_buffer_size;
25extern unsigned long fs_cpu_buffer_size; 25extern unsigned long fs_cpu_buffer_size;
26extern unsigned long fs_buffer_watershed; 26extern unsigned long fs_buffer_watershed;
27extern struct oprofile_operations oprofile_ops; 27extern struct oprofile_operations oprofile_ops;
28extern unsigned long oprofile_started; 28extern unsigned long oprofile_started;
29extern unsigned long backtrace_depth; 29extern unsigned long backtrace_depth;
30 30
31struct super_block; 31struct super_block;
32struct dentry; 32struct dentry;
33 33
34void oprofile_create_files(struct super_block * sb, struct dentry * root); 34void oprofile_create_files(struct super_block *sb, struct dentry *root);
35void oprofile_timer_init(struct oprofile_operations * ops); 35void oprofile_timer_init(struct oprofile_operations *ops);
36 36
37int oprofile_set_backtrace(unsigned long depth); 37int oprofile_set_backtrace(unsigned long depth);
38 38
39#endif /* OPROF_H */ 39#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index ef953ba5ab6b..cc106d503ace 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -13,18 +13,18 @@
13#include "event_buffer.h" 13#include "event_buffer.h"
14#include "oprofile_stats.h" 14#include "oprofile_stats.h"
15#include "oprof.h" 15#include "oprof.h"
16 16
17unsigned long fs_buffer_size = 131072; 17unsigned long fs_buffer_size = 131072;
18unsigned long fs_cpu_buffer_size = 8192; 18unsigned long fs_cpu_buffer_size = 8192;
19unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */ 19unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
20 20
21static ssize_t depth_read(struct file * file, char __user * buf, size_t count, loff_t * offset) 21static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
22{ 22{
23 return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset); 23 return oprofilefs_ulong_to_user(backtrace_depth, buf, count, offset);
24} 24}
25 25
26 26
27static ssize_t depth_write(struct file * file, char const __user * buf, size_t count, loff_t * offset) 27static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
28{ 28{
29 unsigned long val; 29 unsigned long val;
30 int retval; 30 int retval;
@@ -49,8 +49,8 @@ static const struct file_operations depth_fops = {
49 .write = depth_write 49 .write = depth_write
50}; 50};
51 51
52 52
53static ssize_t pointer_size_read(struct file * file, char __user * buf, size_t count, loff_t * offset) 53static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
54{ 54{
55 return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset); 55 return oprofilefs_ulong_to_user(sizeof(void *), buf, count, offset);
56} 56}
@@ -61,24 +61,24 @@ static const struct file_operations pointer_size_fops = {
61}; 61};
62 62
63 63
64static ssize_t cpu_type_read(struct file * file, char __user * buf, size_t count, loff_t * offset) 64static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
65{ 65{
66 return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset); 66 return oprofilefs_str_to_user(oprofile_ops.cpu_type, buf, count, offset);
67} 67}
68 68
69 69
70static const struct file_operations cpu_type_fops = { 70static const struct file_operations cpu_type_fops = {
71 .read = cpu_type_read, 71 .read = cpu_type_read,
72}; 72};
73 73
74 74
75static ssize_t enable_read(struct file * file, char __user * buf, size_t count, loff_t * offset) 75static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
76{ 76{
77 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset); 77 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
78} 78}
79 79
80 80
81static ssize_t enable_write(struct file * file, char const __user * buf, size_t count, loff_t * offset) 81static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
82{ 82{
83 unsigned long val; 83 unsigned long val;
84 int retval; 84 int retval;
@@ -89,7 +89,7 @@ static ssize_t enable_write(struct file * file, char const __user * buf, size_t
89 retval = oprofilefs_ulong_from_user(&val, buf, count); 89 retval = oprofilefs_ulong_from_user(&val, buf, count);
90 if (retval) 90 if (retval)
91 return retval; 91 return retval;
92 92
93 if (val) 93 if (val)
94 retval = oprofile_start(); 94 retval = oprofile_start();
95 else 95 else
@@ -100,14 +100,14 @@ static ssize_t enable_write(struct file * file, char const __user * buf, size_t
100 return count; 100 return count;
101} 101}
102 102
103 103
104static const struct file_operations enable_fops = { 104static const struct file_operations enable_fops = {
105 .read = enable_read, 105 .read = enable_read,
106 .write = enable_write, 106 .write = enable_write,
107}; 107};
108 108
109 109
110static ssize_t dump_write(struct file * file, char const __user * buf, size_t count, loff_t * offset) 110static ssize_t dump_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
111{ 111{
112 wake_up_buffer_waiter(); 112 wake_up_buffer_waiter();
113 return count; 113 return count;
@@ -117,8 +117,8 @@ static ssize_t dump_write(struct file * file, char const __user * buf, size_t co
117static const struct file_operations dump_fops = { 117static const struct file_operations dump_fops = {
118 .write = dump_write, 118 .write = dump_write,
119}; 119};
120 120
121void oprofile_create_files(struct super_block * sb, struct dentry * root) 121void oprofile_create_files(struct super_block *sb, struct dentry *root)
122{ 122{
123 oprofilefs_create_file(sb, root, "enable", &enable_fops); 123 oprofilefs_create_file(sb, root, "enable", &enable_fops);
124 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); 124 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
@@ -126,7 +126,7 @@ void oprofile_create_files(struct super_block * sb, struct dentry * root)
126 oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size); 126 oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
127 oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed); 127 oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
128 oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size); 128 oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &fs_cpu_buffer_size);
129 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); 129 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
130 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); 130 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
131 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); 131 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
132 oprofile_create_stats_files(sb, root); 132 oprofile_create_stats_files(sb, root);
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index f99b28e7b79a..e1f6ce03705e 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -11,17 +11,17 @@
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/threads.h> 13#include <linux/threads.h>
14 14
15#include "oprofile_stats.h" 15#include "oprofile_stats.h"
16#include "cpu_buffer.h" 16#include "cpu_buffer.h"
17 17
18struct oprofile_stat_struct oprofile_stats; 18struct oprofile_stat_struct oprofile_stats;
19 19
20void oprofile_reset_stats(void) 20void oprofile_reset_stats(void)
21{ 21{
22 struct oprofile_cpu_buffer * cpu_buf; 22 struct oprofile_cpu_buffer *cpu_buf;
23 int i; 23 int i;
24 24
25 for_each_possible_cpu(i) { 25 for_each_possible_cpu(i) {
26 cpu_buf = &per_cpu(cpu_buffer, i); 26 cpu_buf = &per_cpu(cpu_buffer, i);
27 cpu_buf->sample_received = 0; 27 cpu_buf->sample_received = 0;
@@ -29,18 +29,18 @@ void oprofile_reset_stats(void)
29 cpu_buf->backtrace_aborted = 0; 29 cpu_buf->backtrace_aborted = 0;
30 cpu_buf->sample_invalid_eip = 0; 30 cpu_buf->sample_invalid_eip = 0;
31 } 31 }
32 32
33 atomic_set(&oprofile_stats.sample_lost_no_mm, 0); 33 atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
34 atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); 34 atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35 atomic_set(&oprofile_stats.event_lost_overflow, 0); 35 atomic_set(&oprofile_stats.event_lost_overflow, 0);
36} 36}
37 37
38 38
39void oprofile_create_stats_files(struct super_block * sb, struct dentry * root) 39void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
40{ 40{
41 struct oprofile_cpu_buffer * cpu_buf; 41 struct oprofile_cpu_buffer *cpu_buf;
42 struct dentry * cpudir; 42 struct dentry *cpudir;
43 struct dentry * dir; 43 struct dentry *dir;
44 char buf[10]; 44 char buf[10];
45 int i; 45 int i;
46 46
@@ -52,7 +52,7 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
52 cpu_buf = &per_cpu(cpu_buffer, i); 52 cpu_buf = &per_cpu(cpu_buffer, i);
53 snprintf(buf, 10, "cpu%d", i); 53 snprintf(buf, 10, "cpu%d", i);
54 cpudir = oprofilefs_mkdir(sb, dir, buf); 54 cpudir = oprofilefs_mkdir(sb, dir, buf);
55 55
56 /* Strictly speaking access to these ulongs is racy, 56 /* Strictly speaking access to these ulongs is racy,
57 * but we can't simply lock them, and they are 57 * but we can't simply lock them, and they are
58 * informational only. 58 * informational only.
@@ -66,7 +66,7 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
66 oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip", 66 oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip",
67 &cpu_buf->sample_invalid_eip); 67 &cpu_buf->sample_invalid_eip);
68 } 68 }
69 69
70 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm", 70 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
71 &oprofile_stats.sample_lost_no_mm); 71 &oprofile_stats.sample_lost_no_mm);
72 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping", 72 oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping",
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index 6d755a633f15..3da0d08dc1f9 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -11,7 +11,7 @@
11#define OPROFILE_STATS_H 11#define OPROFILE_STATS_H
12 12
13#include <asm/atomic.h> 13#include <asm/atomic.h>
14 14
15struct oprofile_stat_struct { 15struct oprofile_stat_struct {
16 atomic_t sample_lost_no_mm; 16 atomic_t sample_lost_no_mm;
17 atomic_t sample_lost_no_mapping; 17 atomic_t sample_lost_no_mapping;
@@ -20,14 +20,14 @@ struct oprofile_stat_struct {
20}; 20};
21 21
22extern struct oprofile_stat_struct oprofile_stats; 22extern struct oprofile_stat_struct oprofile_stats;
23 23
24/* reset all stats to zero */ 24/* reset all stats to zero */
25void oprofile_reset_stats(void); 25void oprofile_reset_stats(void);
26 26
27struct super_block; 27struct super_block;
28struct dentry; 28struct dentry;
29 29
30/* create the stats/ dir */ 30/* create the stats/ dir */
31void oprofile_create_stats_files(struct super_block * sb, struct dentry * root); 31void oprofile_create_stats_files(struct super_block *sb, struct dentry *root);
32 32
33#endif /* OPROFILE_STATS_H */ 33#endif /* OPROFILE_STATS_H */
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 8543cb26cf34..ddc4c59f02dc 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -23,9 +23,9 @@
23 23
24DEFINE_SPINLOCK(oprofilefs_lock); 24DEFINE_SPINLOCK(oprofilefs_lock);
25 25
26static struct inode * oprofilefs_get_inode(struct super_block * sb, int mode) 26static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode)
27{ 27{
28 struct inode * inode = new_inode(sb); 28 struct inode *inode = new_inode(sb);
29 29
30 if (inode) { 30 if (inode) {
31 inode->i_mode = mode; 31 inode->i_mode = mode;
@@ -44,7 +44,7 @@ static struct super_operations s_ops = {
44}; 44};
45 45
46 46
47ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset) 47ssize_t oprofilefs_str_to_user(char const *str, char __user *buf, size_t count, loff_t *offset)
48{ 48{
49 return simple_read_from_buffer(buf, count, offset, str, strlen(str)); 49 return simple_read_from_buffer(buf, count, offset, str, strlen(str));
50} 50}
@@ -52,7 +52,7 @@ ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count
52 52
53#define TMPBUFSIZE 50 53#define TMPBUFSIZE 50
54 54
55ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset) 55ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t count, loff_t *offset)
56{ 56{
57 char tmpbuf[TMPBUFSIZE]; 57 char tmpbuf[TMPBUFSIZE];
58 size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val); 58 size_t maxlen = snprintf(tmpbuf, TMPBUFSIZE, "%lu\n", val);
@@ -62,7 +62,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
62} 62}
63 63
64 64
65int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) 65int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
66{ 66{
67 char tmpbuf[TMPBUFSIZE]; 67 char tmpbuf[TMPBUFSIZE];
68 unsigned long flags; 68 unsigned long flags;
@@ -85,16 +85,16 @@ int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, siz
85} 85}
86 86
87 87
88static ssize_t ulong_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset) 88static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
89{ 89{
90 unsigned long * val = file->private_data; 90 unsigned long *val = file->private_data;
91 return oprofilefs_ulong_to_user(*val, buf, count, offset); 91 return oprofilefs_ulong_to_user(*val, buf, count, offset);
92} 92}
93 93
94 94
95static ssize_t ulong_write_file(struct file * file, char const __user * buf, size_t count, loff_t * offset) 95static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
96{ 96{
97 unsigned long * value = file->private_data; 97 unsigned long *value = file->private_data;
98 int retval; 98 int retval;
99 99
100 if (*offset) 100 if (*offset)
@@ -108,7 +108,7 @@ static ssize_t ulong_write_file(struct file * file, char const __user * buf, siz
108} 108}
109 109
110 110
111static int default_open(struct inode * inode, struct file * filp) 111static int default_open(struct inode *inode, struct file *filp)
112{ 112{
113 if (inode->i_private) 113 if (inode->i_private)
114 filp->private_data = inode->i_private; 114 filp->private_data = inode->i_private;
@@ -129,12 +129,12 @@ static const struct file_operations ulong_ro_fops = {
129}; 129};
130 130
131 131
132static struct dentry * __oprofilefs_create_file(struct super_block * sb, 132static struct dentry *__oprofilefs_create_file(struct super_block *sb,
133 struct dentry * root, char const * name, const struct file_operations * fops, 133 struct dentry *root, char const *name, const struct file_operations *fops,
134 int perm) 134 int perm)
135{ 135{
136 struct dentry * dentry; 136 struct dentry *dentry;
137 struct inode * inode; 137 struct inode *inode;
138 138
139 dentry = d_alloc_name(root, name); 139 dentry = d_alloc_name(root, name);
140 if (!dentry) 140 if (!dentry)
@@ -150,10 +150,10 @@ static struct dentry * __oprofilefs_create_file(struct super_block * sb,
150} 150}
151 151
152 152
153int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root, 153int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
154 char const * name, unsigned long * val) 154 char const *name, unsigned long *val)
155{ 155{
156 struct dentry * d = __oprofilefs_create_file(sb, root, name, 156 struct dentry *d = __oprofilefs_create_file(sb, root, name,
157 &ulong_fops, 0644); 157 &ulong_fops, 0644);
158 if (!d) 158 if (!d)
159 return -EFAULT; 159 return -EFAULT;
@@ -163,10 +163,10 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
163} 163}
164 164
165 165
166int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root, 166int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
167 char const * name, unsigned long * val) 167 char const *name, unsigned long *val)
168{ 168{
169 struct dentry * d = __oprofilefs_create_file(sb, root, name, 169 struct dentry *d = __oprofilefs_create_file(sb, root, name,
170 &ulong_ro_fops, 0444); 170 &ulong_ro_fops, 0444);
171 if (!d) 171 if (!d)
172 return -EFAULT; 172 return -EFAULT;
@@ -176,23 +176,23 @@ int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
176} 176}
177 177
178 178
179static ssize_t atomic_read_file(struct file * file, char __user * buf, size_t count, loff_t * offset) 179static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
180{ 180{
181 atomic_t * val = file->private_data; 181 atomic_t *val = file->private_data;
182 return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset); 182 return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
183} 183}
184 184
185 185
186static const struct file_operations atomic_ro_fops = { 186static const struct file_operations atomic_ro_fops = {
187 .read = atomic_read_file, 187 .read = atomic_read_file,
188 .open = default_open, 188 .open = default_open,
189}; 189};
190
191 190
192int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root, 191
193 char const * name, atomic_t * val) 192int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
193 char const *name, atomic_t *val)
194{ 194{
195 struct dentry * d = __oprofilefs_create_file(sb, root, name, 195 struct dentry *d = __oprofilefs_create_file(sb, root, name,
196 &atomic_ro_fops, 0444); 196 &atomic_ro_fops, 0444);
197 if (!d) 197 if (!d)
198 return -EFAULT; 198 return -EFAULT;
@@ -201,9 +201,9 @@ int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
201 return 0; 201 return 0;
202} 202}
203 203
204 204
205int oprofilefs_create_file(struct super_block * sb, struct dentry * root, 205int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
206 char const * name, const struct file_operations * fops) 206 char const *name, const struct file_operations *fops)
207{ 207{
208 if (!__oprofilefs_create_file(sb, root, name, fops, 0644)) 208 if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
209 return -EFAULT; 209 return -EFAULT;
@@ -211,8 +211,8 @@ int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
211} 211}
212 212
213 213
214int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root, 214int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
215 char const * name, const struct file_operations * fops, int perm) 215 char const *name, const struct file_operations *fops, int perm)
216{ 216{
217 if (!__oprofilefs_create_file(sb, root, name, fops, perm)) 217 if (!__oprofilefs_create_file(sb, root, name, fops, perm))
218 return -EFAULT; 218 return -EFAULT;
@@ -220,11 +220,11 @@ int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
220} 220}
221 221
222 222
223struct dentry * oprofilefs_mkdir(struct super_block * sb, 223struct dentry *oprofilefs_mkdir(struct super_block *sb,
224 struct dentry * root, char const * name) 224 struct dentry *root, char const *name)
225{ 225{
226 struct dentry * dentry; 226 struct dentry *dentry;
227 struct inode * inode; 227 struct inode *inode;
228 228
229 dentry = d_alloc_name(root, name); 229 dentry = d_alloc_name(root, name);
230 if (!dentry) 230 if (!dentry)
@@ -241,10 +241,10 @@ struct dentry * oprofilefs_mkdir(struct super_block * sb,
241} 241}
242 242
243 243
244static int oprofilefs_fill_super(struct super_block * sb, void * data, int silent) 244static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
245{ 245{
246 struct inode * root_inode; 246 struct inode *root_inode;
247 struct dentry * root_dentry; 247 struct dentry *root_dentry;
248 248
249 sb->s_blocksize = PAGE_CACHE_SIZE; 249 sb->s_blocksize = PAGE_CACHE_SIZE;
250 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 250 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 710a45f0d734..333f915568c7 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -19,7 +19,7 @@
19 19
20static int timer_notify(struct pt_regs *regs) 20static int timer_notify(struct pt_regs *regs)
21{ 21{
22 oprofile_add_sample(regs, 0); 22 oprofile_add_sample(regs, 0);
23 return 0; 23 return 0;
24} 24}
25 25
@@ -35,7 +35,7 @@ static void timer_stop(void)
35} 35}
36 36
37 37
38void __init oprofile_timer_init(struct oprofile_operations * ops) 38void __init oprofile_timer_init(struct oprofile_operations *ops)
39{ 39{
40 ops->create_files = NULL; 40 ops->create_files = NULL;
41 ops->setup = NULL; 41 ops->setup = NULL;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 8a846adf1dcf..96f3bdf0ec4b 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2791,6 +2791,7 @@ enum parport_pc_pci_cards {
2791 oxsemi_952, 2791 oxsemi_952,
2792 oxsemi_954, 2792 oxsemi_954,
2793 oxsemi_840, 2793 oxsemi_840,
2794 oxsemi_pcie_pport,
2794 aks_0100, 2795 aks_0100,
2795 mobility_pp, 2796 mobility_pp,
2796 netmos_9705, 2797 netmos_9705,
@@ -2868,6 +2869,7 @@ static struct parport_pc_pci {
2868 /* oxsemi_952 */ { 1, { { 0, 1 }, } }, 2869 /* oxsemi_952 */ { 1, { { 0, 1 }, } },
2869 /* oxsemi_954 */ { 1, { { 0, -1 }, } }, 2870 /* oxsemi_954 */ { 1, { { 0, -1 }, } },
2870 /* oxsemi_840 */ { 1, { { 0, 1 }, } }, 2871 /* oxsemi_840 */ { 1, { { 0, 1 }, } },
2872 /* oxsemi_pcie_pport */ { 1, { { 0, 1 }, } },
2871 /* aks_0100 */ { 1, { { 0, -1 }, } }, 2873 /* aks_0100 */ { 1, { { 0, -1 }, } },
2872 /* mobility_pp */ { 1, { { 0, 1 }, } }, 2874 /* mobility_pp */ { 1, { { 0, 1 }, } },
2873 /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */ 2875 /* netmos_9705 */ { 1, { { 0, -1 }, } }, /* untested */
@@ -2928,7 +2930,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
2928 { 0x1409, 0x7268, 0x1409, 0x0103, 0, 0, timedia_4008a }, 2930 { 0x1409, 0x7268, 0x1409, 0x0103, 0, 0, timedia_4008a },
2929 { 0x1409, 0x7268, 0x1409, 0x0104, 0, 0, timedia_4018 }, 2931 { 0x1409, 0x7268, 0x1409, 0x0104, 0, 0, timedia_4018 },
2930 { 0x1409, 0x7268, 0x1409, 0x9018, 0, 0, timedia_9018a }, 2932 { 0x1409, 0x7268, 0x1409, 0x9018, 0, 0, timedia_9018a },
2931 { 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp },
2932 { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_2P_EPP, 2933 { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_2P_EPP,
2933 PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_2p_epp }, 2934 PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_2p_epp },
2934 { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_1P_ECP, 2935 { PCI_VENDOR_ID_SYBA, PCI_DEVICE_ID_SYBA_1P_ECP,
@@ -2946,8 +2947,25 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
2946 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 }, 2947 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_954 },
2947 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840, 2948 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_12PCI840,
2948 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_840 }, 2949 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_840 },
2950 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840,
2951 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2952 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe840_G,
2953 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2954 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0,
2955 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2956 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_0_G,
2957 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2958 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1,
2959 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2960 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_G,
2961 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2962 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_U,
2963 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2964 { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU,
2965 PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport },
2949 { PCI_VENDOR_ID_AKS, PCI_DEVICE_ID_AKS_ALADDINCARD, 2966 { PCI_VENDOR_ID_AKS, PCI_DEVICE_ID_AKS_ALADDINCARD,
2950 PCI_ANY_ID, PCI_ANY_ID, 0, 0, aks_0100 }, 2967 PCI_ANY_ID, PCI_ANY_ID, 0, 0, aks_0100 },
2968 { 0x14f2, 0x0121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, mobility_pp },
2951 /* NetMos communication controllers */ 2969 /* NetMos communication controllers */
2952 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9705, 2970 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9705,
2953 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9705 }, 2971 PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9705 },
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 8b29c307f1a1..691b3adeb870 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -188,8 +188,7 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
188 return 0; 188 return 0;
189} 189}
190 190
191static int __init 191static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
192dmar_parse_dev(struct dmar_drhd_unit *dmaru)
193{ 192{
194 struct acpi_dmar_hardware_unit *drhd; 193 struct acpi_dmar_hardware_unit *drhd;
195 static int include_all; 194 static int include_all;
@@ -277,14 +276,15 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
277 drhd = (struct acpi_dmar_hardware_unit *)header; 276 drhd = (struct acpi_dmar_hardware_unit *)header;
278 printk (KERN_INFO PREFIX 277 printk (KERN_INFO PREFIX
279 "DRHD (flags: 0x%08x)base: 0x%016Lx\n", 278 "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
280 drhd->flags, drhd->address); 279 drhd->flags, (unsigned long long)drhd->address);
281 break; 280 break;
282 case ACPI_DMAR_TYPE_RESERVED_MEMORY: 281 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
283 rmrr = (struct acpi_dmar_reserved_memory *)header; 282 rmrr = (struct acpi_dmar_reserved_memory *)header;
284 283
285 printk (KERN_INFO PREFIX 284 printk (KERN_INFO PREFIX
286 "RMRR base: 0x%016Lx end: 0x%016Lx\n", 285 "RMRR base: 0x%016Lx end: 0x%016Lx\n",
287 rmrr->base_address, rmrr->end_address); 286 (unsigned long long)rmrr->base_address,
287 (unsigned long long)rmrr->end_address);
288 break; 288 break;
289 } 289 }
290} 290}
@@ -328,7 +328,7 @@ parse_dmar_table(void)
328 if (!dmar) 328 if (!dmar)
329 return -ENODEV; 329 return -ENODEV;
330 330
331 if (dmar->width < PAGE_SHIFT_4K - 1) { 331 if (dmar->width < PAGE_SHIFT - 1) {
332 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); 332 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
333 return -EINVAL; 333 return -EINVAL;
334 } 334 }
@@ -460,8 +460,8 @@ void __init detect_intel_iommu(void)
460 460
461 ret = dmar_table_detect(); 461 ret = dmar_table_detect();
462 462
463#ifdef CONFIG_DMAR
464 { 463 {
464#ifdef CONFIG_INTR_REMAP
465 struct acpi_table_dmar *dmar; 465 struct acpi_table_dmar *dmar;
466 /* 466 /*
467 * for now we will disable dma-remapping when interrupt 467 * for now we will disable dma-remapping when interrupt
@@ -470,29 +470,17 @@ void __init detect_intel_iommu(void)
470 * is added, we will not need this any more. 470 * is added, we will not need this any more.
471 */ 471 */
472 dmar = (struct acpi_table_dmar *) dmar_tbl; 472 dmar = (struct acpi_table_dmar *) dmar_tbl;
473 if (ret && cpu_has_x2apic && dmar->flags & 0x1) { 473 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
474 printk(KERN_INFO 474 printk(KERN_INFO
475 "Queued invalidation will be enabled to support " 475 "Queued invalidation will be enabled to support "
476 "x2apic and Intr-remapping.\n"); 476 "x2apic and Intr-remapping.\n");
477 printk(KERN_INFO 477#endif
478 "Disabling IOMMU detection, because of missing " 478#ifdef CONFIG_DMAR
479 "queued invalidation support for IOTLB "
480 "invalidation\n");
481 printk(KERN_INFO
482 "Use \"nox2apic\", if you want to use Intel "
483 " IOMMU for DMA-remapping and don't care about "
484 " x2apic support\n");
485
486 dmar_disabled = 1;
487 goto end;
488 }
489
490 if (ret && !no_iommu && !iommu_detected && !swiotlb && 479 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
491 !dmar_disabled) 480 !dmar_disabled)
492 iommu_detected = 1; 481 iommu_detected = 1;
493 }
494end:
495#endif 482#endif
483 }
496 dmar_tbl = NULL; 484 dmar_tbl = NULL;
497} 485}
498 486
@@ -510,7 +498,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
510 498
511 iommu->seq_id = iommu_allocated++; 499 iommu->seq_id = iommu_allocated++;
512 500
513 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); 501 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
514 if (!iommu->reg) { 502 if (!iommu->reg) {
515 printk(KERN_ERR "IOMMU: can't map the region\n"); 503 printk(KERN_ERR "IOMMU: can't map the region\n");
516 goto error; 504 goto error;
@@ -521,8 +509,8 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
521 /* the registers might be more than one page */ 509 /* the registers might be more than one page */
522 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), 510 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
523 cap_max_fault_reg_offset(iommu->cap)); 511 cap_max_fault_reg_offset(iommu->cap));
524 map_size = PAGE_ALIGN_4K(map_size); 512 map_size = VTD_PAGE_ALIGN(map_size);
525 if (map_size > PAGE_SIZE_4K) { 513 if (map_size > VTD_PAGE_SIZE) {
526 iounmap(iommu->reg); 514 iounmap(iommu->reg);
527 iommu->reg = ioremap(drhd->reg_base_addr, map_size); 515 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
528 if (!iommu->reg) { 516 if (!iommu->reg) {
@@ -533,8 +521,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
533 521
534 ver = readl(iommu->reg + DMAR_VER_REG); 522 ver = readl(iommu->reg + DMAR_VER_REG);
535 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", 523 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
536 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), 524 (unsigned long long)drhd->reg_base_addr,
537 iommu->cap, iommu->ecap); 525 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
526 (unsigned long long)iommu->cap,
527 (unsigned long long)iommu->ecap);
538 528
539 spin_lock_init(&iommu->register_lock); 529 spin_lock_init(&iommu->register_lock);
540 530
@@ -587,11 +577,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
587 577
588 hw = qi->desc; 578 hw = qi->desc;
589 579
590 spin_lock(&qi->q_lock); 580 spin_lock_irqsave(&qi->q_lock, flags);
591 while (qi->free_cnt < 3) { 581 while (qi->free_cnt < 3) {
592 spin_unlock(&qi->q_lock); 582 spin_unlock_irqrestore(&qi->q_lock, flags);
593 cpu_relax(); 583 cpu_relax();
594 spin_lock(&qi->q_lock); 584 spin_lock_irqsave(&qi->q_lock, flags);
595 } 585 }
596 586
597 index = qi->free_head; 587 index = qi->free_head;
@@ -612,15 +602,22 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
612 qi->free_head = (qi->free_head + 2) % QI_LENGTH; 602 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
613 qi->free_cnt -= 2; 603 qi->free_cnt -= 2;
614 604
615 spin_lock_irqsave(&iommu->register_lock, flags); 605 spin_lock(&iommu->register_lock);
616 /* 606 /*
617 * update the HW tail register indicating the presence of 607 * update the HW tail register indicating the presence of
618 * new descriptors. 608 * new descriptors.
619 */ 609 */
620 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); 610 writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
621 spin_unlock_irqrestore(&iommu->register_lock, flags); 611 spin_unlock(&iommu->register_lock);
622 612
623 while (qi->desc_status[wait_index] != QI_DONE) { 613 while (qi->desc_status[wait_index] != QI_DONE) {
614 /*
615 * We will leave the interrupts disabled, to prevent interrupt
616 * context to queue another cmd while a cmd is already submitted
617 * and waiting for completion on this cpu. This is to avoid
618 * a deadlock where the interrupt context can wait indefinitely
619 * for free slots in the queue.
620 */
624 spin_unlock(&qi->q_lock); 621 spin_unlock(&qi->q_lock);
625 cpu_relax(); 622 cpu_relax();
626 spin_lock(&qi->q_lock); 623 spin_lock(&qi->q_lock);
@@ -629,7 +626,7 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
629 qi->desc_status[index] = QI_DONE; 626 qi->desc_status[index] = QI_DONE;
630 627
631 reclaim_free_desc(qi); 628 reclaim_free_desc(qi);
632 spin_unlock(&qi->q_lock); 629 spin_unlock_irqrestore(&qi->q_lock, flags);
633} 630}
634 631
635/* 632/*
@@ -645,6 +642,62 @@ void qi_global_iec(struct intel_iommu *iommu)
645 qi_submit_sync(&desc, iommu); 642 qi_submit_sync(&desc, iommu);
646} 643}
647 644
645int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
646 u64 type, int non_present_entry_flush)
647{
648
649 struct qi_desc desc;
650
651 if (non_present_entry_flush) {
652 if (!cap_caching_mode(iommu->cap))
653 return 1;
654 else
655 did = 0;
656 }
657
658 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
659 | QI_CC_GRAN(type) | QI_CC_TYPE;
660 desc.high = 0;
661
662 qi_submit_sync(&desc, iommu);
663
664 return 0;
665
666}
667
668int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
669 unsigned int size_order, u64 type,
670 int non_present_entry_flush)
671{
672 u8 dw = 0, dr = 0;
673
674 struct qi_desc desc;
675 int ih = 0;
676
677 if (non_present_entry_flush) {
678 if (!cap_caching_mode(iommu->cap))
679 return 1;
680 else
681 did = 0;
682 }
683
684 if (cap_write_drain(iommu->cap))
685 dw = 1;
686
687 if (cap_read_drain(iommu->cap))
688 dr = 1;
689
690 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
691 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
692 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
693 | QI_IOTLB_AM(size_order);
694
695 qi_submit_sync(&desc, iommu);
696
697 return 0;
698
699}
700
648/* 701/*
649 * Enable Queued Invalidation interface. This is a must to support 702 * Enable Queued Invalidation interface. This is a must to support
650 * interrupt-remapping. Also used by DMA-remapping, which replaces 703 * interrupt-remapping. Also used by DMA-remapping, which replaces
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 5a58b075dd8d..f9e244da30ae 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -50,9 +50,6 @@
50#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) 50#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
51#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) 51#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
52 52
53/* name size which is used for entries in pcihpfs */
54#define SLOT_NAME_SIZE 20 /* {_SUN} */
55
56struct acpiphp_bridge; 53struct acpiphp_bridge;
57struct acpiphp_slot; 54struct acpiphp_slot;
58 55
@@ -63,9 +60,13 @@ struct slot {
63 struct hotplug_slot *hotplug_slot; 60 struct hotplug_slot *hotplug_slot;
64 struct acpiphp_slot *acpi_slot; 61 struct acpiphp_slot *acpi_slot;
65 struct hotplug_slot_info info; 62 struct hotplug_slot_info info;
66 char name[SLOT_NAME_SIZE];
67}; 63};
68 64
65static inline const char *slot_name(struct slot *slot)
66{
67 return hotplug_slot_name(slot->hotplug_slot);
68}
69
69/* 70/*
70 * struct acpiphp_bridge - PCI bridge information 71 * struct acpiphp_bridge - PCI bridge information
71 * 72 *
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 0e496e866a84..95b536a23d25 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -44,6 +44,9 @@
44 44
45#define MY_NAME "acpiphp" 45#define MY_NAME "acpiphp"
46 46
47/* name size which is used for entries in pcihpfs */
48#define SLOT_NAME_SIZE 21 /* {_SUN} */
49
47static int debug; 50static int debug;
48int acpiphp_debug; 51int acpiphp_debug;
49 52
@@ -84,7 +87,6 @@ static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
84 .get_adapter_status = get_adapter_status, 87 .get_adapter_status = get_adapter_status,
85}; 88};
86 89
87
88/** 90/**
89 * acpiphp_register_attention - set attention LED callback 91 * acpiphp_register_attention - set attention LED callback
90 * @info: must be completely filled with LED callbacks 92 * @info: must be completely filled with LED callbacks
@@ -136,7 +138,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
136{ 138{
137 struct slot *slot = hotplug_slot->private; 139 struct slot *slot = hotplug_slot->private;
138 140
139 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 141 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
140 142
141 /* enable the specified slot */ 143 /* enable the specified slot */
142 return acpiphp_enable_slot(slot->acpi_slot); 144 return acpiphp_enable_slot(slot->acpi_slot);
@@ -154,7 +156,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
154 struct slot *slot = hotplug_slot->private; 156 struct slot *slot = hotplug_slot->private;
155 int retval; 157 int retval;
156 158
157 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 159 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
158 160
159 /* disable the specified slot */ 161 /* disable the specified slot */
160 retval = acpiphp_disable_slot(slot->acpi_slot); 162 retval = acpiphp_disable_slot(slot->acpi_slot);
@@ -177,7 +179,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
177 { 179 {
178 int retval = -ENODEV; 180 int retval = -ENODEV;
179 181
180 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 182 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
181 183
182 if (attention_info && try_module_get(attention_info->owner)) { 184 if (attention_info && try_module_get(attention_info->owner)) {
183 retval = attention_info->set_attn(hotplug_slot, status); 185 retval = attention_info->set_attn(hotplug_slot, status);
@@ -200,7 +202,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
200{ 202{
201 struct slot *slot = hotplug_slot->private; 203 struct slot *slot = hotplug_slot->private;
202 204
203 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 205 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
204 206
205 *value = acpiphp_get_power_status(slot->acpi_slot); 207 *value = acpiphp_get_power_status(slot->acpi_slot);
206 208
@@ -222,7 +224,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
222{ 224{
223 int retval = -EINVAL; 225 int retval = -EINVAL;
224 226
225 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 227 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
226 228
227 if (attention_info && try_module_get(attention_info->owner)) { 229 if (attention_info && try_module_get(attention_info->owner)) {
228 retval = attention_info->get_attn(hotplug_slot, value); 230 retval = attention_info->get_attn(hotplug_slot, value);
@@ -245,7 +247,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
245{ 247{
246 struct slot *slot = hotplug_slot->private; 248 struct slot *slot = hotplug_slot->private;
247 249
248 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 250 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
249 251
250 *value = acpiphp_get_latch_status(slot->acpi_slot); 252 *value = acpiphp_get_latch_status(slot->acpi_slot);
251 253
@@ -265,7 +267,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
265{ 267{
266 struct slot *slot = hotplug_slot->private; 268 struct slot *slot = hotplug_slot->private;
267 269
268 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 270 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
269 271
270 *value = acpiphp_get_adapter_status(slot->acpi_slot); 272 *value = acpiphp_get_adapter_status(slot->acpi_slot);
271 273
@@ -299,7 +301,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
299{ 301{
300 struct slot *slot = hotplug_slot->private; 302 struct slot *slot = hotplug_slot->private;
301 303
302 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 304 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
303 305
304 kfree(slot->hotplug_slot); 306 kfree(slot->hotplug_slot);
305 kfree(slot); 307 kfree(slot);
@@ -310,6 +312,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
310{ 312{
311 struct slot *slot; 313 struct slot *slot;
312 int retval = -ENOMEM; 314 int retval = -ENOMEM;
315 char name[SLOT_NAME_SIZE];
313 316
314 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 317 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
315 if (!slot) 318 if (!slot)
@@ -321,8 +324,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
321 324
322 slot->hotplug_slot->info = &slot->info; 325 slot->hotplug_slot->info = &slot->info;
323 326
324 slot->hotplug_slot->name = slot->name;
325
326 slot->hotplug_slot->private = slot; 327 slot->hotplug_slot->private = slot;
327 slot->hotplug_slot->release = &release_slot; 328 slot->hotplug_slot->release = &release_slot;
328 slot->hotplug_slot->ops = &acpi_hotplug_slot_ops; 329 slot->hotplug_slot->ops = &acpi_hotplug_slot_ops;
@@ -336,11 +337,12 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
336 slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; 337 slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
337 338
338 acpiphp_slot->slot = slot; 339 acpiphp_slot->slot = slot;
339 snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun); 340 snprintf(name, SLOT_NAME_SIZE, "%u", slot->acpi_slot->sun);
340 341
341 retval = pci_hp_register(slot->hotplug_slot, 342 retval = pci_hp_register(slot->hotplug_slot,
342 acpiphp_slot->bridge->pci_bus, 343 acpiphp_slot->bridge->pci_bus,
343 acpiphp_slot->device); 344 acpiphp_slot->device,
345 name);
344 if (retval == -EBUSY) 346 if (retval == -EBUSY)
345 goto error_hpslot; 347 goto error_hpslot;
346 if (retval) { 348 if (retval) {
@@ -348,7 +350,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
348 goto error_hpslot; 350 goto error_hpslot;
349 } 351 }
350 352
351 info("Slot [%s] registered\n", slot->hotplug_slot->name); 353 info("Slot [%s] registered\n", slot_name(slot));
352 354
353 return 0; 355 return 0;
354error_hpslot: 356error_hpslot:
@@ -365,7 +367,7 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
365 struct slot *slot = acpiphp_slot->slot; 367 struct slot *slot = acpiphp_slot->slot;
366 int retval = 0; 368 int retval = 0;
367 369
368 info ("Slot [%s] unregistered\n", slot->hotplug_slot->name); 370 info("Slot [%s] unregistered\n", slot_name(slot));
369 371
370 retval = pci_hp_deregister(slot->hotplug_slot); 372 retval = pci_hp_deregister(slot->hotplug_slot);
371 if (retval) 373 if (retval)
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index d9769b30be9a..9fff878cf026 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -30,6 +30,7 @@
30 30
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33#include <linux/pci_hotplug.h>
33 34
34/* PICMG 2.1 R2.0 HS CSR bits: */ 35/* PICMG 2.1 R2.0 HS CSR bits: */
35#define HS_CSR_INS 0x0080 36#define HS_CSR_INS 0x0080
@@ -69,6 +70,11 @@ struct cpci_hp_controller {
69 struct cpci_hp_controller_ops *ops; 70 struct cpci_hp_controller_ops *ops;
70}; 71};
71 72
73static inline const char *slot_name(struct slot *slot)
74{
75 return hotplug_slot_name(slot->hotplug_slot);
76}
77
72extern int cpci_hp_register_controller(struct cpci_hp_controller *controller); 78extern int cpci_hp_register_controller(struct cpci_hp_controller *controller);
73extern int cpci_hp_unregister_controller(struct cpci_hp_controller *controller); 79extern int cpci_hp_unregister_controller(struct cpci_hp_controller *controller);
74extern int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last); 80extern int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 935947991dc9..de94f4feef8c 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -108,7 +108,7 @@ enable_slot(struct hotplug_slot *hotplug_slot)
108 struct slot *slot = hotplug_slot->private; 108 struct slot *slot = hotplug_slot->private;
109 int retval = 0; 109 int retval = 0;
110 110
111 dbg("%s - physical_slot = %s", __func__, hotplug_slot->name); 111 dbg("%s - physical_slot = %s", __func__, slot_name(slot));
112 112
113 if (controller->ops->set_power) 113 if (controller->ops->set_power)
114 retval = controller->ops->set_power(slot, 1); 114 retval = controller->ops->set_power(slot, 1);
@@ -121,25 +121,23 @@ disable_slot(struct hotplug_slot *hotplug_slot)
121 struct slot *slot = hotplug_slot->private; 121 struct slot *slot = hotplug_slot->private;
122 int retval = 0; 122 int retval = 0;
123 123
124 dbg("%s - physical_slot = %s", __func__, hotplug_slot->name); 124 dbg("%s - physical_slot = %s", __func__, slot_name(slot));
125 125
126 down_write(&list_rwsem); 126 down_write(&list_rwsem);
127 127
128 /* Unconfigure device */ 128 /* Unconfigure device */
129 dbg("%s - unconfiguring slot %s", 129 dbg("%s - unconfiguring slot %s", __func__, slot_name(slot));
130 __func__, slot->hotplug_slot->name);
131 if ((retval = cpci_unconfigure_slot(slot))) { 130 if ((retval = cpci_unconfigure_slot(slot))) {
132 err("%s - could not unconfigure slot %s", 131 err("%s - could not unconfigure slot %s",
133 __func__, slot->hotplug_slot->name); 132 __func__, slot_name(slot));
134 goto disable_error; 133 goto disable_error;
135 } 134 }
136 dbg("%s - finished unconfiguring slot %s", 135 dbg("%s - finished unconfiguring slot %s", __func__, slot_name(slot));
137 __func__, slot->hotplug_slot->name);
138 136
139 /* Clear EXT (by setting it) */ 137 /* Clear EXT (by setting it) */
140 if (cpci_clear_ext(slot)) { 138 if (cpci_clear_ext(slot)) {
141 err("%s - could not clear EXT for slot %s", 139 err("%s - could not clear EXT for slot %s",
142 __func__, slot->hotplug_slot->name); 140 __func__, slot_name(slot));
143 retval = -ENODEV; 141 retval = -ENODEV;
144 goto disable_error; 142 goto disable_error;
145 } 143 }
@@ -214,7 +212,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
214 struct slot *slot = hotplug_slot->private; 212 struct slot *slot = hotplug_slot->private;
215 213
216 kfree(slot->hotplug_slot->info); 214 kfree(slot->hotplug_slot->info);
217 kfree(slot->hotplug_slot->name);
218 kfree(slot->hotplug_slot); 215 kfree(slot->hotplug_slot);
219 if (slot->dev) 216 if (slot->dev)
220 pci_dev_put(slot->dev); 217 pci_dev_put(slot->dev);
@@ -222,12 +219,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
222} 219}
223 220
224#define SLOT_NAME_SIZE 6 221#define SLOT_NAME_SIZE 6
225static void
226make_slot_name(struct slot *slot)
227{
228 snprintf(slot->hotplug_slot->name,
229 SLOT_NAME_SIZE, "%02x:%02x", slot->bus->number, slot->number);
230}
231 222
232int 223int
233cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last) 224cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
@@ -235,7 +226,7 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
235 struct slot *slot; 226 struct slot *slot;
236 struct hotplug_slot *hotplug_slot; 227 struct hotplug_slot *hotplug_slot;
237 struct hotplug_slot_info *info; 228 struct hotplug_slot_info *info;
238 char *name; 229 char name[SLOT_NAME_SIZE];
239 int status = -ENOMEM; 230 int status = -ENOMEM;
240 int i; 231 int i;
241 232
@@ -262,34 +253,31 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
262 goto error_hpslot; 253 goto error_hpslot;
263 hotplug_slot->info = info; 254 hotplug_slot->info = info;
264 255
265 name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
266 if (!name)
267 goto error_info;
268 hotplug_slot->name = name;
269
270 slot->bus = bus; 256 slot->bus = bus;
271 slot->number = i; 257 slot->number = i;
272 slot->devfn = PCI_DEVFN(i, 0); 258 slot->devfn = PCI_DEVFN(i, 0);
273 259
260 snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i);
261
274 hotplug_slot->private = slot; 262 hotplug_slot->private = slot;
275 hotplug_slot->release = &release_slot; 263 hotplug_slot->release = &release_slot;
276 make_slot_name(slot);
277 hotplug_slot->ops = &cpci_hotplug_slot_ops; 264 hotplug_slot->ops = &cpci_hotplug_slot_ops;
278 265
279 /* 266 /*
280 * Initialize the slot info structure with some known 267 * Initialize the slot info structure with some known
281 * good values. 268 * good values.
282 */ 269 */
283 dbg("initializing slot %s", slot->hotplug_slot->name); 270 dbg("initializing slot %s", name);
284 info->power_status = cpci_get_power_status(slot); 271 info->power_status = cpci_get_power_status(slot);
285 info->attention_status = cpci_get_attention_status(slot); 272 info->attention_status = cpci_get_attention_status(slot);
286 273
287 dbg("registering slot %s", slot->hotplug_slot->name); 274 dbg("registering slot %s", name);
288 status = pci_hp_register(slot->hotplug_slot, bus, i); 275 status = pci_hp_register(slot->hotplug_slot, bus, i, name);
289 if (status) { 276 if (status) {
290 err("pci_hp_register failed with error %d", status); 277 err("pci_hp_register failed with error %d", status);
291 goto error_name; 278 goto error_info;
292 } 279 }
280 dbg("slot registered with name: %s", slot_name(slot));
293 281
294 /* Add slot to our internal list */ 282 /* Add slot to our internal list */
295 down_write(&list_rwsem); 283 down_write(&list_rwsem);
@@ -298,8 +286,6 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
298 up_write(&list_rwsem); 286 up_write(&list_rwsem);
299 } 287 }
300 return 0; 288 return 0;
301error_name:
302 kfree(name);
303error_info: 289error_info:
304 kfree(info); 290 kfree(info);
305error_hpslot: 291error_hpslot:
@@ -327,7 +313,7 @@ cpci_hp_unregister_bus(struct pci_bus *bus)
327 list_del(&slot->slot_list); 313 list_del(&slot->slot_list);
328 slots--; 314 slots--;
329 315
330 dbg("deregistering slot %s", slot->hotplug_slot->name); 316 dbg("deregistering slot %s", slot_name(slot));
331 status = pci_hp_deregister(slot->hotplug_slot); 317 status = pci_hp_deregister(slot->hotplug_slot);
332 if (status) { 318 if (status) {
333 err("pci_hp_deregister failed with error %d", 319 err("pci_hp_deregister failed with error %d",
@@ -379,11 +365,10 @@ init_slots(int clear_ins)
379 return -1; 365 return -1;
380 } 366 }
381 list_for_each_entry(slot, &slot_list, slot_list) { 367 list_for_each_entry(slot, &slot_list, slot_list) {
382 dbg("%s - looking at slot %s", 368 dbg("%s - looking at slot %s", __func__, slot_name(slot));
383 __func__, slot->hotplug_slot->name);
384 if (clear_ins && cpci_check_and_clear_ins(slot)) 369 if (clear_ins && cpci_check_and_clear_ins(slot))
385 dbg("%s - cleared INS for slot %s", 370 dbg("%s - cleared INS for slot %s",
386 __func__, slot->hotplug_slot->name); 371 __func__, slot_name(slot));
387 dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0)); 372 dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0));
388 if (dev) { 373 if (dev) {
389 if (update_adapter_status(slot->hotplug_slot, 1)) 374 if (update_adapter_status(slot->hotplug_slot, 1))
@@ -414,8 +399,7 @@ check_slots(void)
414 } 399 }
415 extracted = inserted = 0; 400 extracted = inserted = 0;
416 list_for_each_entry(slot, &slot_list, slot_list) { 401 list_for_each_entry(slot, &slot_list, slot_list) {
417 dbg("%s - looking at slot %s", 402 dbg("%s - looking at slot %s", __func__, slot_name(slot));
418 __func__, slot->hotplug_slot->name);
419 if (cpci_check_and_clear_ins(slot)) { 403 if (cpci_check_and_clear_ins(slot)) {
420 /* 404 /*
421 * Some broken hardware (e.g. PLX 9054AB) asserts 405 * Some broken hardware (e.g. PLX 9054AB) asserts
@@ -423,35 +407,34 @@ check_slots(void)
423 */ 407 */
424 if (slot->dev) { 408 if (slot->dev) {
425 warn("slot %s already inserted", 409 warn("slot %s already inserted",
426 slot->hotplug_slot->name); 410 slot_name(slot));
427 inserted++; 411 inserted++;
428 continue; 412 continue;
429 } 413 }
430 414
431 /* Process insertion */ 415 /* Process insertion */
432 dbg("%s - slot %s inserted", 416 dbg("%s - slot %s inserted", __func__, slot_name(slot));
433 __func__, slot->hotplug_slot->name);
434 417
435 /* GSM, debug */ 418 /* GSM, debug */
436 hs_csr = cpci_get_hs_csr(slot); 419 hs_csr = cpci_get_hs_csr(slot);
437 dbg("%s - slot %s HS_CSR (1) = %04x", 420 dbg("%s - slot %s HS_CSR (1) = %04x",
438 __func__, slot->hotplug_slot->name, hs_csr); 421 __func__, slot_name(slot), hs_csr);
439 422
440 /* Configure device */ 423 /* Configure device */
441 dbg("%s - configuring slot %s", 424 dbg("%s - configuring slot %s",
442 __func__, slot->hotplug_slot->name); 425 __func__, slot_name(slot));
443 if (cpci_configure_slot(slot)) { 426 if (cpci_configure_slot(slot)) {
444 err("%s - could not configure slot %s", 427 err("%s - could not configure slot %s",
445 __func__, slot->hotplug_slot->name); 428 __func__, slot_name(slot));
446 continue; 429 continue;
447 } 430 }
448 dbg("%s - finished configuring slot %s", 431 dbg("%s - finished configuring slot %s",
449 __func__, slot->hotplug_slot->name); 432 __func__, slot_name(slot));
450 433
451 /* GSM, debug */ 434 /* GSM, debug */
452 hs_csr = cpci_get_hs_csr(slot); 435 hs_csr = cpci_get_hs_csr(slot);
453 dbg("%s - slot %s HS_CSR (2) = %04x", 436 dbg("%s - slot %s HS_CSR (2) = %04x",
454 __func__, slot->hotplug_slot->name, hs_csr); 437 __func__, slot_name(slot), hs_csr);
455 438
456 if (update_latch_status(slot->hotplug_slot, 1)) 439 if (update_latch_status(slot->hotplug_slot, 1))
457 warn("failure to update latch file"); 440 warn("failure to update latch file");
@@ -464,18 +447,18 @@ check_slots(void)
464 /* GSM, debug */ 447 /* GSM, debug */
465 hs_csr = cpci_get_hs_csr(slot); 448 hs_csr = cpci_get_hs_csr(slot);
466 dbg("%s - slot %s HS_CSR (3) = %04x", 449 dbg("%s - slot %s HS_CSR (3) = %04x",
467 __func__, slot->hotplug_slot->name, hs_csr); 450 __func__, slot_name(slot), hs_csr);
468 451
469 inserted++; 452 inserted++;
470 } else if (cpci_check_ext(slot)) { 453 } else if (cpci_check_ext(slot)) {
471 /* Process extraction request */ 454 /* Process extraction request */
472 dbg("%s - slot %s extracted", 455 dbg("%s - slot %s extracted",
473 __func__, slot->hotplug_slot->name); 456 __func__, slot_name(slot));
474 457
475 /* GSM, debug */ 458 /* GSM, debug */
476 hs_csr = cpci_get_hs_csr(slot); 459 hs_csr = cpci_get_hs_csr(slot);
477 dbg("%s - slot %s HS_CSR = %04x", 460 dbg("%s - slot %s HS_CSR = %04x",
478 __func__, slot->hotplug_slot->name, hs_csr); 461 __func__, slot_name(slot), hs_csr);
479 462
480 if (!slot->extracting) { 463 if (!slot->extracting) {
481 if (update_latch_status(slot->hotplug_slot, 0)) { 464 if (update_latch_status(slot->hotplug_slot, 0)) {
@@ -493,7 +476,7 @@ check_slots(void)
493 * bother trying to tell the driver or not? 476 * bother trying to tell the driver or not?
494 */ 477 */
495 err("card in slot %s was improperly removed", 478 err("card in slot %s was improperly removed",
496 slot->hotplug_slot->name); 479 slot_name(slot));
497 if (update_adapter_status(slot->hotplug_slot, 0)) 480 if (update_adapter_status(slot->hotplug_slot, 0))
498 warn("failure to update adapter file"); 481 warn("failure to update adapter file");
499 slot->extracting = 0; 482 slot->extracting = 0;
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index df82b95e2874..829c327cfb5e 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -209,7 +209,7 @@ int cpci_led_on(struct slot* slot)
209 hs_cap + 2, 209 hs_cap + 2,
210 hs_csr)) { 210 hs_csr)) {
211 err("Could not set LOO for slot %s", 211 err("Could not set LOO for slot %s",
212 slot->hotplug_slot->name); 212 hotplug_slot_name(slot->hotplug_slot));
213 return -ENODEV; 213 return -ENODEV;
214 } 214 }
215 } 215 }
@@ -238,7 +238,7 @@ int cpci_led_off(struct slot* slot)
238 hs_cap + 2, 238 hs_cap + 2,
239 hs_csr)) { 239 hs_csr)) {
240 err("Could not clear LOO for slot %s", 240 err("Could not clear LOO for slot %s",
241 slot->hotplug_slot->name); 241 hotplug_slot_name(slot->hotplug_slot));
242 return -ENODEV; 242 return -ENODEV;
243 } 243 }
244 } 244 }
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index b1decfa88b7a..afaf8f69f73e 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -449,6 +449,11 @@ extern u8 cpqhp_disk_irq;
449 449
450/* inline functions */ 450/* inline functions */
451 451
452static inline char *slot_name(struct slot *slot)
453{
454 return hotplug_slot_name(slot->hotplug_slot);
455}
456
452/* 457/*
453 * return_resource 458 * return_resource
454 * 459 *
@@ -696,14 +701,6 @@ static inline int get_presence_status(struct controller *ctrl, struct slot *slot
696 return presence_save; 701 return presence_save;
697} 702}
698 703
699#define SLOT_NAME_SIZE 10
700
701static inline void make_slot_name(char *buffer, int buffer_size, struct slot *slot)
702{
703 snprintf(buffer, buffer_size, "%d", slot->number);
704}
705
706
707static inline int wait_for_ctrl_irq(struct controller *ctrl) 704static inline int wait_for_ctrl_irq(struct controller *ctrl)
708{ 705{
709 DECLARE_WAITQUEUE(wait, current); 706 DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 54defec51d08..724d42c4adbc 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -315,14 +315,15 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
315{ 315{
316 struct slot *slot = hotplug_slot->private; 316 struct slot *slot = hotplug_slot->private;
317 317
318 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 318 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
319 319
320 kfree(slot->hotplug_slot->info); 320 kfree(slot->hotplug_slot->info);
321 kfree(slot->hotplug_slot->name);
322 kfree(slot->hotplug_slot); 321 kfree(slot->hotplug_slot);
323 kfree(slot); 322 kfree(slot);
324} 323}
325 324
325#define SLOT_NAME_SIZE 10
326
326static int ctrl_slot_setup(struct controller *ctrl, 327static int ctrl_slot_setup(struct controller *ctrl,
327 void __iomem *smbios_start, 328 void __iomem *smbios_start,
328 void __iomem *smbios_table) 329 void __iomem *smbios_table)
@@ -335,6 +336,7 @@ static int ctrl_slot_setup(struct controller *ctrl,
335 u8 slot_number; 336 u8 slot_number;
336 u8 ctrl_slot; 337 u8 ctrl_slot;
337 u32 tempdword; 338 u32 tempdword;
339 char name[SLOT_NAME_SIZE];
338 void __iomem *slot_entry= NULL; 340 void __iomem *slot_entry= NULL;
339 int result = -ENOMEM; 341 int result = -ENOMEM;
340 342
@@ -363,16 +365,12 @@ static int ctrl_slot_setup(struct controller *ctrl,
363 if (!hotplug_slot->info) 365 if (!hotplug_slot->info)
364 goto error_hpslot; 366 goto error_hpslot;
365 hotplug_slot_info = hotplug_slot->info; 367 hotplug_slot_info = hotplug_slot->info;
366 hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL);
367
368 if (!hotplug_slot->name)
369 goto error_info;
370 368
371 slot->ctrl = ctrl; 369 slot->ctrl = ctrl;
372 slot->bus = ctrl->bus; 370 slot->bus = ctrl->bus;
373 slot->device = slot_device; 371 slot->device = slot_device;
374 slot->number = slot_number; 372 slot->number = slot_number;
375 dbg("slot->number = %d\n", slot->number); 373 dbg("slot->number = %u\n", slot->number);
376 374
377 slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9, 375 slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
378 slot_entry); 376 slot_entry);
@@ -418,9 +416,9 @@ static int ctrl_slot_setup(struct controller *ctrl,
418 /* register this slot with the hotplug pci core */ 416 /* register this slot with the hotplug pci core */
419 hotplug_slot->release = &release_slot; 417 hotplug_slot->release = &release_slot;
420 hotplug_slot->private = slot; 418 hotplug_slot->private = slot;
421 make_slot_name(hotplug_slot->name, SLOT_NAME_SIZE, slot); 419 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
422 hotplug_slot->ops = &cpqphp_hotplug_slot_ops; 420 hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
423 421
424 hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot); 422 hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
425 hotplug_slot_info->attention_status = 423 hotplug_slot_info->attention_status =
426 cpq_get_attention_status(ctrl, slot); 424 cpq_get_attention_status(ctrl, slot);
@@ -436,10 +434,11 @@ static int ctrl_slot_setup(struct controller *ctrl,
436 slot_number); 434 slot_number);
437 result = pci_hp_register(hotplug_slot, 435 result = pci_hp_register(hotplug_slot,
438 ctrl->pci_dev->subordinate, 436 ctrl->pci_dev->subordinate,
439 slot->device); 437 slot->device,
438 name);
440 if (result) { 439 if (result) {
441 err("pci_hp_register failed with error %d\n", result); 440 err("pci_hp_register failed with error %d\n", result);
442 goto error_name; 441 goto error_info;
443 } 442 }
444 443
445 slot->next = ctrl->slot; 444 slot->next = ctrl->slot;
@@ -451,8 +450,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
451 } 450 }
452 451
453 return 0; 452 return 0;
454error_name:
455 kfree(hotplug_slot->name);
456error_info: 453error_info:
457 kfree(hotplug_slot_info); 454 kfree(hotplug_slot_info);
458error_hpslot: 455error_hpslot:
@@ -638,7 +635,7 @@ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status)
638 u8 device; 635 u8 device;
639 u8 function; 636 u8 function;
640 637
641 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 638 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
642 639
643 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) 640 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
644 return -ENODEV; 641 return -ENODEV;
@@ -665,7 +662,7 @@ static int process_SI(struct hotplug_slot *hotplug_slot)
665 u8 device; 662 u8 device;
666 u8 function; 663 u8 function;
667 664
668 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 665 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
669 666
670 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) 667 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
671 return -ENODEV; 668 return -ENODEV;
@@ -697,7 +694,7 @@ static int process_SS(struct hotplug_slot *hotplug_slot)
697 u8 device; 694 u8 device;
698 u8 function; 695 u8 function;
699 696
700 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 697 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
701 698
702 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) 699 if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
703 return -ENODEV; 700 return -ENODEV;
@@ -720,7 +717,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
720 struct slot *slot = hotplug_slot->private; 717 struct slot *slot = hotplug_slot->private;
721 struct controller *ctrl = slot->ctrl; 718 struct controller *ctrl = slot->ctrl;
722 719
723 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 720 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
724 721
725 return cpqhp_hardware_test(ctrl, value); 722 return cpqhp_hardware_test(ctrl, value);
726} 723}
@@ -731,7 +728,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
731 struct slot *slot = hotplug_slot->private; 728 struct slot *slot = hotplug_slot->private;
732 struct controller *ctrl = slot->ctrl; 729 struct controller *ctrl = slot->ctrl;
733 730
734 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 731 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
735 732
736 *value = get_slot_enabled(ctrl, slot); 733 *value = get_slot_enabled(ctrl, slot);
737 return 0; 734 return 0;
@@ -742,7 +739,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
742 struct slot *slot = hotplug_slot->private; 739 struct slot *slot = hotplug_slot->private;
743 struct controller *ctrl = slot->ctrl; 740 struct controller *ctrl = slot->ctrl;
744 741
745 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 742 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
746 743
747 *value = cpq_get_attention_status(ctrl, slot); 744 *value = cpq_get_attention_status(ctrl, slot);
748 return 0; 745 return 0;
@@ -753,7 +750,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
753 struct slot *slot = hotplug_slot->private; 750 struct slot *slot = hotplug_slot->private;
754 struct controller *ctrl = slot->ctrl; 751 struct controller *ctrl = slot->ctrl;
755 752
756 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 753 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
757 754
758 *value = cpq_get_latch_status(ctrl, slot); 755 *value = cpq_get_latch_status(ctrl, slot);
759 756
@@ -765,7 +762,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
765 struct slot *slot = hotplug_slot->private; 762 struct slot *slot = hotplug_slot->private;
766 struct controller *ctrl = slot->ctrl; 763 struct controller *ctrl = slot->ctrl;
767 764
768 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 765 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
769 766
770 *value = get_presence_status(ctrl, slot); 767 *value = get_presence_status(ctrl, slot);
771 768
@@ -777,7 +774,7 @@ static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
777 struct slot *slot = hotplug_slot->private; 774 struct slot *slot = hotplug_slot->private;
778 struct controller *ctrl = slot->ctrl; 775 struct controller *ctrl = slot->ctrl;
779 776
780 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 777 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
781 778
782 *value = ctrl->speed_capability; 779 *value = ctrl->speed_capability;
783 780
@@ -789,7 +786,7 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
789 struct slot *slot = hotplug_slot->private; 786 struct slot *slot = hotplug_slot->private;
790 struct controller *ctrl = slot->ctrl; 787 struct controller *ctrl = slot->ctrl;
791 788
792 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 789 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
793 790
794 *value = ctrl->speed; 791 *value = ctrl->speed;
795 792
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index ef041ca91c27..a60a25290995 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1139,7 +1139,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
1139 for(slot = ctrl->slot; slot; slot = slot->next) { 1139 for(slot = ctrl->slot; slot; slot = slot->next) {
1140 if (slot->device == (hp_slot + ctrl->slot_device_offset)) 1140 if (slot->device == (hp_slot + ctrl->slot_device_offset))
1141 continue; 1141 continue;
1142 if (!slot->hotplug_slot && !slot->hotplug_slot->info) 1142 if (!slot->hotplug_slot || !slot->hotplug_slot->info)
1143 continue; 1143 continue;
1144 if (slot->hotplug_slot->info->adapter_status == 0) 1144 if (slot->hotplug_slot->info->adapter_status == 0)
1145 continue; 1145 continue;
diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
index 146ca9cd1567..3a2637a00934 100644
--- a/drivers/pci/hotplug/fakephp.c
+++ b/drivers/pci/hotplug/fakephp.c
@@ -66,10 +66,10 @@ struct dummy_slot {
66 struct pci_dev *dev; 66 struct pci_dev *dev;
67 struct work_struct remove_work; 67 struct work_struct remove_work;
68 unsigned long removed; 68 unsigned long removed;
69 char name[8];
70}; 69};
71 70
72static int debug; 71static int debug;
72static int dup_slots;
73static LIST_HEAD(slot_list); 73static LIST_HEAD(slot_list);
74static struct workqueue_struct *dummyphp_wq; 74static struct workqueue_struct *dummyphp_wq;
75 75
@@ -96,10 +96,13 @@ static void dummy_release(struct hotplug_slot *slot)
96 kfree(dslot); 96 kfree(dslot);
97} 97}
98 98
99#define SLOT_NAME_SIZE 8
100
99static int add_slot(struct pci_dev *dev) 101static int add_slot(struct pci_dev *dev)
100{ 102{
101 struct dummy_slot *dslot; 103 struct dummy_slot *dslot;
102 struct hotplug_slot *slot; 104 struct hotplug_slot *slot;
105 char name[SLOT_NAME_SIZE];
103 int retval = -ENOMEM; 106 int retval = -ENOMEM;
104 static int count = 1; 107 static int count = 1;
105 108
@@ -119,19 +122,22 @@ static int add_slot(struct pci_dev *dev)
119 if (!dslot) 122 if (!dslot)
120 goto error_info; 123 goto error_info;
121 124
122 slot->name = dslot->name; 125 if (dup_slots)
123 snprintf(slot->name, sizeof(dslot->name), "fake%d", count++); 126 snprintf(name, SLOT_NAME_SIZE, "fake");
124 dbg("slot->name = %s\n", slot->name); 127 else
128 snprintf(name, SLOT_NAME_SIZE, "fake%d", count++);
129 dbg("slot->name = %s\n", name);
125 slot->ops = &dummy_hotplug_slot_ops; 130 slot->ops = &dummy_hotplug_slot_ops;
126 slot->release = &dummy_release; 131 slot->release = &dummy_release;
127 slot->private = dslot; 132 slot->private = dslot;
128 133
129 retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn)); 134 retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn), name);
130 if (retval) { 135 if (retval) {
131 err("pci_hp_register failed with error %d\n", retval); 136 err("pci_hp_register failed with error %d\n", retval);
132 goto error_dslot; 137 goto error_dslot;
133 } 138 }
134 139
140 dbg("slot->name = %s\n", hotplug_slot_name(slot));
135 dslot->slot = slot; 141 dslot->slot = slot;
136 dslot->dev = pci_dev_get(dev); 142 dslot->dev = pci_dev_get(dev);
137 list_add (&dslot->node, &slot_list); 143 list_add (&dslot->node, &slot_list);
@@ -167,10 +173,11 @@ static void remove_slot(struct dummy_slot *dslot)
167{ 173{
168 int retval; 174 int retval;
169 175
170 dbg("removing slot %s\n", dslot->slot->name); 176 dbg("removing slot %s\n", hotplug_slot_name(dslot->slot));
171 retval = pci_hp_deregister(dslot->slot); 177 retval = pci_hp_deregister(dslot->slot);
172 if (retval) 178 if (retval)
173 err("Problem unregistering a slot %s\n", dslot->slot->name); 179 err("Problem unregistering a slot %s\n",
180 hotplug_slot_name(dslot->slot));
174} 181}
175 182
176/* called from the single-threaded workqueue handler to remove a slot */ 183/* called from the single-threaded workqueue handler to remove a slot */
@@ -308,7 +315,7 @@ static int disable_slot(struct hotplug_slot *slot)
308 return -ENODEV; 315 return -ENODEV;
309 dslot = slot->private; 316 dslot = slot->private;
310 317
311 dbg("%s - physical_slot = %s\n", __func__, slot->name); 318 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(slot));
312 319
313 for (func = 7; func >= 0; func--) { 320 for (func = 7; func >= 0; func--) {
314 dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func); 321 dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func);
@@ -373,4 +380,5 @@ MODULE_DESCRIPTION(DRIVER_DESC);
373MODULE_LICENSE("GPL"); 380MODULE_LICENSE("GPL");
374module_param(debug, bool, S_IRUGO | S_IWUSR); 381module_param(debug, bool, S_IRUGO | S_IWUSR);
375MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); 382MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
376 383module_param(dup_slots, bool, S_IRUGO | S_IWUSR);
384MODULE_PARM_DESC(dup_slots, "Force duplicate slot names for debugging");
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index 612d96301509..a8d391a4957d 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -707,17 +707,16 @@ struct slot {
707 u8 device; 707 u8 device;
708 u8 number; 708 u8 number;
709 u8 real_physical_slot_num; 709 u8 real_physical_slot_num;
710 char name[100];
711 u32 capabilities; 710 u32 capabilities;
712 u8 supported_speed; 711 u8 supported_speed;
713 u8 supported_bus_mode; 712 u8 supported_bus_mode;
713 u8 flag; /* this is for disable slot and polling */
714 u8 ctlr_index;
714 struct hotplug_slot *hotplug_slot; 715 struct hotplug_slot *hotplug_slot;
715 struct controller *ctrl; 716 struct controller *ctrl;
716 struct pci_func *func; 717 struct pci_func *func;
717 u8 irq[4]; 718 u8 irq[4];
718 u8 flag; /* this is for disable slot and polling */
719 int bit_mode; /* 0 = 32, 1 = 64 */ 719 int bit_mode; /* 0 = 32, 1 = 64 */
720 u8 ctlr_index;
721 struct bus_info *bus_on; 720 struct bus_info *bus_on;
722 struct list_head ibm_slot_list; 721 struct list_head ibm_slot_list;
723 u8 status; 722 u8 status;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 8cfd1c4926c8..c1abac8ab5c3 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -587,11 +587,14 @@ static u8 calculate_first_slot (u8 slot_num)
587 return first_slot + 1; 587 return first_slot + 1;
588 588
589} 589}
590
591#define SLOT_NAME_SIZE 30
592
590static char *create_file_name (struct slot * slot_cur) 593static char *create_file_name (struct slot * slot_cur)
591{ 594{
592 struct opt_rio *opt_vg_ptr = NULL; 595 struct opt_rio *opt_vg_ptr = NULL;
593 struct opt_rio_lo *opt_lo_ptr = NULL; 596 struct opt_rio_lo *opt_lo_ptr = NULL;
594 static char str[30]; 597 static char str[SLOT_NAME_SIZE];
595 int which = 0; /* rxe = 1, chassis = 0 */ 598 int which = 0; /* rxe = 1, chassis = 0 */
596 u8 number = 1; /* either chassis or rxe # */ 599 u8 number = 1; /* either chassis or rxe # */
597 u8 first_slot = 1; 600 u8 first_slot = 1;
@@ -703,7 +706,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
703 706
704 slot = hotplug_slot->private; 707 slot = hotplug_slot->private;
705 kfree(slot->hotplug_slot->info); 708 kfree(slot->hotplug_slot->info);
706 kfree(slot->hotplug_slot->name);
707 kfree(slot->hotplug_slot); 709 kfree(slot->hotplug_slot);
708 slot->ctrl = NULL; 710 slot->ctrl = NULL;
709 slot->bus_on = NULL; 711 slot->bus_on = NULL;
@@ -734,6 +736,7 @@ static int __init ebda_rsrc_controller (void)
734 struct bus_info *bus_info_ptr1, *bus_info_ptr2; 736 struct bus_info *bus_info_ptr1, *bus_info_ptr2;
735 int rc; 737 int rc;
736 struct slot *tmp_slot; 738 struct slot *tmp_slot;
739 char name[SLOT_NAME_SIZE];
737 740
738 addr = hpc_list_ptr->phys_addr; 741 addr = hpc_list_ptr->phys_addr;
739 for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) { 742 for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) {
@@ -897,12 +900,6 @@ static int __init ebda_rsrc_controller (void)
897 goto error_no_hp_info; 900 goto error_no_hp_info;
898 } 901 }
899 902
900 hp_slot_ptr->name = kmalloc(30, GFP_KERNEL);
901 if (!hp_slot_ptr->name) {
902 rc = -ENOMEM;
903 goto error_no_hp_name;
904 }
905
906 tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL); 903 tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL);
907 if (!tmp_slot) { 904 if (!tmp_slot) {
908 rc = -ENOMEM; 905 rc = -ENOMEM;
@@ -964,9 +961,9 @@ static int __init ebda_rsrc_controller (void)
964 } /* each hpc */ 961 } /* each hpc */
965 962
966 list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) { 963 list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) {
967 snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); 964 snprintf(name, SLOT_NAME_SIZE, "%s", create_file_name(tmp_slot));
968 pci_hp_register(tmp_slot->hotplug_slot, 965 pci_hp_register(tmp_slot->hotplug_slot,
969 pci_find_bus(0, tmp_slot->bus), tmp_slot->device); 966 pci_find_bus(0, tmp_slot->bus), tmp_slot->device, name);
970 } 967 }
971 968
972 print_ebda_hpc (); 969 print_ebda_hpc ();
@@ -976,8 +973,6 @@ static int __init ebda_rsrc_controller (void)
976error: 973error:
977 kfree (hp_slot_ptr->private); 974 kfree (hp_slot_ptr->private);
978error_no_slot: 975error_no_slot:
979 kfree (hp_slot_ptr->name);
980error_no_hp_name:
981 kfree (hp_slot_ptr->info); 976 kfree (hp_slot_ptr->info);
982error_no_hp_info: 977error_no_hp_info:
983 kfree (hp_slot_ptr); 978 kfree (hp_slot_ptr);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 2e6c4474644e..535fce0f07f9 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -37,6 +37,7 @@
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/mount.h> 38#include <linux/mount.h>
39#include <linux/namei.h> 39#include <linux/namei.h>
40#include <linux/mutex.h>
40#include <linux/pci.h> 41#include <linux/pci.h>
41#include <linux/pci_hotplug.h> 42#include <linux/pci_hotplug.h>
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
@@ -61,7 +62,7 @@ static int debug;
61////////////////////////////////////////////////////////////////// 62//////////////////////////////////////////////////////////////////
62 63
63static LIST_HEAD(pci_hotplug_slot_list); 64static LIST_HEAD(pci_hotplug_slot_list);
64static DEFINE_SPINLOCK(pci_hotplug_slot_list_lock); 65static DEFINE_MUTEX(pci_hp_mutex);
65 66
66/* these strings match up with the values in pci_bus_speed */ 67/* these strings match up with the values in pci_bus_speed */
67static char *pci_bus_speed_strings[] = { 68static char *pci_bus_speed_strings[] = {
@@ -530,16 +531,12 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
530 struct hotplug_slot *slot; 531 struct hotplug_slot *slot;
531 struct list_head *tmp; 532 struct list_head *tmp;
532 533
533 spin_lock(&pci_hotplug_slot_list_lock);
534 list_for_each (tmp, &pci_hotplug_slot_list) { 534 list_for_each (tmp, &pci_hotplug_slot_list) {
535 slot = list_entry (tmp, struct hotplug_slot, slot_list); 535 slot = list_entry (tmp, struct hotplug_slot, slot_list);
536 if (strcmp(slot->name, name) == 0) 536 if (strcmp(hotplug_slot_name(slot), name) == 0)
537 goto out; 537 return slot;
538 } 538 }
539 slot = NULL; 539 return NULL;
540out:
541 spin_unlock(&pci_hotplug_slot_list_lock);
542 return slot;
543} 540}
544 541
545/** 542/**
@@ -547,13 +544,15 @@ out:
547 * @bus: bus this slot is on 544 * @bus: bus this slot is on
548 * @slot: pointer to the &struct hotplug_slot to register 545 * @slot: pointer to the &struct hotplug_slot to register
549 * @slot_nr: slot number 546 * @slot_nr: slot number
547 * @name: name registered with kobject core
550 * 548 *
551 * Registers a hotplug slot with the pci hotplug subsystem, which will allow 549 * Registers a hotplug slot with the pci hotplug subsystem, which will allow
552 * userspace interaction to the slot. 550 * userspace interaction to the slot.
553 * 551 *
554 * Returns 0 if successful, anything else for an error. 552 * Returns 0 if successful, anything else for an error.
555 */ 553 */
556int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr) 554int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
555 const char *name)
557{ 556{
558 int result; 557 int result;
559 struct pci_slot *pci_slot; 558 struct pci_slot *pci_slot;
@@ -568,48 +567,29 @@ int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr)
568 return -EINVAL; 567 return -EINVAL;
569 } 568 }
570 569
571 /* Check if we have already registered a slot with the same name. */ 570 mutex_lock(&pci_hp_mutex);
572 if (get_slot_from_name(slot->name))
573 return -EEXIST;
574 571
575 /* 572 /*
576 * No problems if we call this interface from both ACPI_PCI_SLOT 573 * No problems if we call this interface from both ACPI_PCI_SLOT
577 * driver and call it here again. If we've already created the 574 * driver and call it here again. If we've already created the
578 * pci_slot, the interface will simply bump the refcount. 575 * pci_slot, the interface will simply bump the refcount.
579 */ 576 */
580 pci_slot = pci_create_slot(bus, slot_nr, slot->name); 577 pci_slot = pci_create_slot(bus, slot_nr, name, slot);
581 if (IS_ERR(pci_slot)) 578 if (IS_ERR(pci_slot)) {
582 return PTR_ERR(pci_slot); 579 result = PTR_ERR(pci_slot);
583 580 goto out;
584 if (pci_slot->hotplug) {
585 dbg("%s: already claimed\n", __func__);
586 pci_destroy_slot(pci_slot);
587 return -EBUSY;
588 } 581 }
589 582
590 slot->pci_slot = pci_slot; 583 slot->pci_slot = pci_slot;
591 pci_slot->hotplug = slot; 584 pci_slot->hotplug = slot;
592 585
593 /*
594 * Allow pcihp drivers to override the ACPI_PCI_SLOT name.
595 */
596 if (strcmp(kobject_name(&pci_slot->kobj), slot->name)) {
597 result = kobject_rename(&pci_slot->kobj, slot->name);
598 if (result) {
599 pci_destroy_slot(pci_slot);
600 return result;
601 }
602 }
603
604 spin_lock(&pci_hotplug_slot_list_lock);
605 list_add(&slot->slot_list, &pci_hotplug_slot_list); 586 list_add(&slot->slot_list, &pci_hotplug_slot_list);
606 spin_unlock(&pci_hotplug_slot_list_lock);
607 587
608 result = fs_add_slot(pci_slot); 588 result = fs_add_slot(pci_slot);
609 kobject_uevent(&pci_slot->kobj, KOBJ_ADD); 589 kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
610 dbg("Added slot %s to the list\n", slot->name); 590 dbg("Added slot %s to the list\n", name);
611 591out:
612 592 mutex_unlock(&pci_hp_mutex);
613 return result; 593 return result;
614} 594}
615 595
@@ -630,21 +610,23 @@ int pci_hp_deregister(struct hotplug_slot *hotplug)
630 if (!hotplug) 610 if (!hotplug)
631 return -ENODEV; 611 return -ENODEV;
632 612
633 temp = get_slot_from_name(hotplug->name); 613 mutex_lock(&pci_hp_mutex);
634 if (temp != hotplug) 614 temp = get_slot_from_name(hotplug_slot_name(hotplug));
615 if (temp != hotplug) {
616 mutex_unlock(&pci_hp_mutex);
635 return -ENODEV; 617 return -ENODEV;
618 }
636 619
637 spin_lock(&pci_hotplug_slot_list_lock);
638 list_del(&hotplug->slot_list); 620 list_del(&hotplug->slot_list);
639 spin_unlock(&pci_hotplug_slot_list_lock);
640 621
641 slot = hotplug->pci_slot; 622 slot = hotplug->pci_slot;
642 fs_remove_slot(slot); 623 fs_remove_slot(slot);
643 dbg("Removed slot %s from the list\n", hotplug->name); 624 dbg("Removed slot %s from the list\n", hotplug_slot_name(hotplug));
644 625
645 hotplug->release(hotplug); 626 hotplug->release(hotplug);
646 slot->hotplug = NULL; 627 slot->hotplug = NULL;
647 pci_destroy_slot(slot); 628 pci_destroy_slot(slot);
629 mutex_unlock(&pci_hp_mutex);
648 630
649 return 0; 631 return 0;
650} 632}
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index c367978bd7fe..a4817a841fae 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -74,15 +74,13 @@ extern struct workqueue_struct *pciehp_wq;
74struct slot { 74struct slot {
75 u8 bus; 75 u8 bus;
76 u8 device; 76 u8 device;
77 u32 number;
78 u8 state; 77 u8 state;
79 struct timer_list task_event;
80 u8 hp_slot; 78 u8 hp_slot;
79 u32 number;
81 struct controller *ctrl; 80 struct controller *ctrl;
82 struct hpc_ops *hpc_ops; 81 struct hpc_ops *hpc_ops;
83 struct hotplug_slot *hotplug_slot; 82 struct hotplug_slot *hotplug_slot;
84 struct list_head slot_list; 83 struct list_head slot_list;
85 char name[SLOT_NAME_SIZE];
86 unsigned long last_emi_toggle; 84 unsigned long last_emi_toggle;
87 struct delayed_work work; /* work for button event */ 85 struct delayed_work work; /* work for button event */
88 struct mutex lock; 86 struct mutex lock;
@@ -112,6 +110,7 @@ struct controller {
112 struct timer_list poll_timer; 110 struct timer_list poll_timer;
113 int cmd_busy; 111 int cmd_busy;
114 unsigned int no_cmd_complete:1; 112 unsigned int no_cmd_complete:1;
113 unsigned int link_active_reporting:1;
115}; 114};
116 115
117#define INT_BUTTON_IGNORE 0 116#define INT_BUTTON_IGNORE 0
@@ -175,6 +174,11 @@ int pciehp_enable_slot(struct slot *p_slot);
175int pciehp_disable_slot(struct slot *p_slot); 174int pciehp_disable_slot(struct slot *p_slot);
176int pcie_enable_notification(struct controller *ctrl); 175int pcie_enable_notification(struct controller *ctrl);
177 176
177static inline const char *slot_name(struct slot *slot)
178{
179 return hotplug_slot_name(slot->hotplug_slot);
180}
181
178static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) 182static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
179{ 183{
180 struct slot *slot; 184 struct slot *slot;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index c748a19db89d..62be1b59c74b 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -185,7 +185,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
185 struct slot *slot = hotplug_slot->private; 185 struct slot *slot = hotplug_slot->private;
186 186
187 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 187 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
188 __func__, hotplug_slot->name); 188 __func__, hotplug_slot_name(hotplug_slot));
189 189
190 kfree(hotplug_slot->info); 190 kfree(hotplug_slot->info);
191 kfree(hotplug_slot); 191 kfree(hotplug_slot);
@@ -196,7 +196,7 @@ static int init_slots(struct controller *ctrl)
196 struct slot *slot; 196 struct slot *slot;
197 struct hotplug_slot *hotplug_slot; 197 struct hotplug_slot *hotplug_slot;
198 struct hotplug_slot_info *info; 198 struct hotplug_slot_info *info;
199 int len, dup = 1; 199 char name[SLOT_NAME_SIZE];
200 int retval = -ENOMEM; 200 int retval = -ENOMEM;
201 201
202 list_for_each_entry(slot, &ctrl->slot_list, slot_list) { 202 list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
@@ -210,41 +210,28 @@ static int init_slots(struct controller *ctrl)
210 210
211 /* register this slot with the hotplug pci core */ 211 /* register this slot with the hotplug pci core */
212 hotplug_slot->info = info; 212 hotplug_slot->info = info;
213 hotplug_slot->name = slot->name;
214 hotplug_slot->private = slot; 213 hotplug_slot->private = slot;
215 hotplug_slot->release = &release_slot; 214 hotplug_slot->release = &release_slot;
216 hotplug_slot->ops = &pciehp_hotplug_slot_ops; 215 hotplug_slot->ops = &pciehp_hotplug_slot_ops;
217 get_power_status(hotplug_slot, &info->power_status);
218 get_attention_status(hotplug_slot, &info->attention_status);
219 get_latch_status(hotplug_slot, &info->latch_status);
220 get_adapter_status(hotplug_slot, &info->adapter_status);
221 slot->hotplug_slot = hotplug_slot; 216 slot->hotplug_slot = hotplug_slot;
217 snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
222 218
223 ctrl_dbg(ctrl, "Registering bus=%x dev=%x hp_slot=%x sun=%x " 219 ctrl_dbg(ctrl, "Registering bus=%x dev=%x hp_slot=%x sun=%x "
224 "slot_device_offset=%x\n", slot->bus, slot->device, 220 "slot_device_offset=%x\n", slot->bus, slot->device,
225 slot->hp_slot, slot->number, ctrl->slot_device_offset); 221 slot->hp_slot, slot->number, ctrl->slot_device_offset);
226duplicate_name:
227 retval = pci_hp_register(hotplug_slot, 222 retval = pci_hp_register(hotplug_slot,
228 ctrl->pci_dev->subordinate, 223 ctrl->pci_dev->subordinate,
229 slot->device); 224 slot->device,
225 name);
230 if (retval) { 226 if (retval) {
231 /*
232 * If slot N already exists, we'll try to create
233 * slot N-1, N-2 ... N-M, until we overflow.
234 */
235 if (retval == -EEXIST) {
236 len = snprintf(slot->name, SLOT_NAME_SIZE,
237 "%d-%d", slot->number, dup++);
238 if (len < SLOT_NAME_SIZE)
239 goto duplicate_name;
240 else
241 ctrl_err(ctrl, "duplicate slot name "
242 "overflow\n");
243 }
244 ctrl_err(ctrl, "pci_hp_register failed with error %d\n", 227 ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
245 retval); 228 retval);
246 goto error_info; 229 goto error_info;
247 } 230 }
231 get_power_status(hotplug_slot, &info->power_status);
232 get_attention_status(hotplug_slot, &info->attention_status);
233 get_latch_status(hotplug_slot, &info->latch_status);
234 get_adapter_status(hotplug_slot, &info->adapter_status);
248 /* create additional sysfs entries */ 235 /* create additional sysfs entries */
249 if (EMI(ctrl)) { 236 if (EMI(ctrl)) {
250 retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj, 237 retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
@@ -287,7 +274,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
287 struct slot *slot = hotplug_slot->private; 274 struct slot *slot = hotplug_slot->private;
288 275
289 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 276 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
290 __func__, hotplug_slot->name); 277 __func__, slot_name(slot));
291 278
292 hotplug_slot->info->attention_status = status; 279 hotplug_slot->info->attention_status = status;
293 280
@@ -303,7 +290,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
303 struct slot *slot = hotplug_slot->private; 290 struct slot *slot = hotplug_slot->private;
304 291
305 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 292 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
306 __func__, hotplug_slot->name); 293 __func__, slot_name(slot));
307 294
308 return pciehp_sysfs_enable_slot(slot); 295 return pciehp_sysfs_enable_slot(slot);
309} 296}
@@ -314,7 +301,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
314 struct slot *slot = hotplug_slot->private; 301 struct slot *slot = hotplug_slot->private;
315 302
316 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 303 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
317 __func__, hotplug_slot->name); 304 __func__, slot_name(slot));
318 305
319 return pciehp_sysfs_disable_slot(slot); 306 return pciehp_sysfs_disable_slot(slot);
320} 307}
@@ -325,7 +312,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
325 int retval; 312 int retval;
326 313
327 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 314 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
328 __func__, hotplug_slot->name); 315 __func__, slot_name(slot));
329 316
330 retval = slot->hpc_ops->get_power_status(slot, value); 317 retval = slot->hpc_ops->get_power_status(slot, value);
331 if (retval < 0) 318 if (retval < 0)
@@ -340,7 +327,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
340 int retval; 327 int retval;
341 328
342 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 329 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
343 __func__, hotplug_slot->name); 330 __func__, slot_name(slot));
344 331
345 retval = slot->hpc_ops->get_attention_status(slot, value); 332 retval = slot->hpc_ops->get_attention_status(slot, value);
346 if (retval < 0) 333 if (retval < 0)
@@ -355,7 +342,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
355 int retval; 342 int retval;
356 343
357 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 344 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
358 __func__, hotplug_slot->name); 345 __func__, slot_name(slot));
359 346
360 retval = slot->hpc_ops->get_latch_status(slot, value); 347 retval = slot->hpc_ops->get_latch_status(slot, value);
361 if (retval < 0) 348 if (retval < 0)
@@ -370,7 +357,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
370 int retval; 357 int retval;
371 358
372 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 359 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
373 __func__, hotplug_slot->name); 360 __func__, slot_name(slot));
374 361
375 retval = slot->hpc_ops->get_adapter_status(slot, value); 362 retval = slot->hpc_ops->get_adapter_status(slot, value);
376 if (retval < 0) 363 if (retval < 0)
@@ -386,7 +373,7 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
386 int retval; 373 int retval;
387 374
388 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 375 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
389 __func__, hotplug_slot->name); 376 __func__, slot_name(slot));
390 377
391 retval = slot->hpc_ops->get_max_bus_speed(slot, value); 378 retval = slot->hpc_ops->get_max_bus_speed(slot, value);
392 if (retval < 0) 379 if (retval < 0)
@@ -401,7 +388,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
401 int retval; 388 int retval;
402 389
403 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n", 390 ctrl_dbg(slot->ctrl, "%s - physical_slot = %s\n",
404 __func__, hotplug_slot->name); 391 __func__, slot_name(slot));
405 392
406 retval = slot->hpc_ops->get_cur_bus_speed(slot, value); 393 retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
407 if (retval < 0) 394 if (retval < 0)
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index acb7f9efd182..d6c5eb297753 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -66,7 +66,7 @@ u8 pciehp_handle_attention_button(struct slot *p_slot)
66 /* 66 /*
67 * Button pressed - See if need to TAKE ACTION!!! 67 * Button pressed - See if need to TAKE ACTION!!!
68 */ 68 */
69 ctrl_info(ctrl, "Button pressed on Slot(%s)\n", p_slot->name); 69 ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot));
70 event_type = INT_BUTTON_PRESS; 70 event_type = INT_BUTTON_PRESS;
71 71
72 queue_interrupt_event(p_slot, event_type); 72 queue_interrupt_event(p_slot, event_type);
@@ -88,13 +88,13 @@ u8 pciehp_handle_switch_change(struct slot *p_slot)
88 /* 88 /*
89 * Switch opened 89 * Switch opened
90 */ 90 */
91 ctrl_info(ctrl, "Latch open on Slot(%s)\n", p_slot->name); 91 ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot));
92 event_type = INT_SWITCH_OPEN; 92 event_type = INT_SWITCH_OPEN;
93 } else { 93 } else {
94 /* 94 /*
95 * Switch closed 95 * Switch closed
96 */ 96 */
97 ctrl_info(ctrl, "Latch close on Slot(%s)\n", p_slot->name); 97 ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot));
98 event_type = INT_SWITCH_CLOSE; 98 event_type = INT_SWITCH_CLOSE;
99 } 99 }
100 100
@@ -120,13 +120,14 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
120 /* 120 /*
121 * Card Present 121 * Card Present
122 */ 122 */
123 ctrl_info(ctrl, "Card present on Slot(%s)\n", p_slot->name); 123 ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot));
124 event_type = INT_PRESENCE_ON; 124 event_type = INT_PRESENCE_ON;
125 } else { 125 } else {
126 /* 126 /*
127 * Not Present 127 * Not Present
128 */ 128 */
129 ctrl_info(ctrl, "Card not present on Slot(%s)\n", p_slot->name); 129 ctrl_info(ctrl, "Card not present on Slot(%s)\n",
130 slot_name(p_slot));
130 event_type = INT_PRESENCE_OFF; 131 event_type = INT_PRESENCE_OFF;
131 } 132 }
132 133
@@ -148,13 +149,13 @@ u8 pciehp_handle_power_fault(struct slot *p_slot)
148 * power fault Cleared 149 * power fault Cleared
149 */ 150 */
150 ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n", 151 ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n",
151 p_slot->name); 152 slot_name(p_slot));
152 event_type = INT_POWER_FAULT_CLEAR; 153 event_type = INT_POWER_FAULT_CLEAR;
153 } else { 154 } else {
154 /* 155 /*
155 * power fault 156 * power fault
156 */ 157 */
157 ctrl_info(ctrl, "Power fault on Slot(%s)\n", p_slot->name); 158 ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot));
158 event_type = INT_POWER_FAULT; 159 event_type = INT_POWER_FAULT;
159 ctrl_info(ctrl, "power fault bit %x set\n", 0); 160 ctrl_info(ctrl, "power fault bit %x set\n", 0);
160 } 161 }
@@ -225,9 +226,6 @@ static int board_added(struct slot *p_slot)
225 if (PWR_LED(ctrl)) 226 if (PWR_LED(ctrl))
226 p_slot->hpc_ops->green_led_blink(p_slot); 227 p_slot->hpc_ops->green_led_blink(p_slot);
227 228
228 /* Wait for ~1 second */
229 msleep(1000);
230
231 /* Check link training status */ 229 /* Check link training status */
232 retval = p_slot->hpc_ops->check_lnk_status(ctrl); 230 retval = p_slot->hpc_ops->check_lnk_status(ctrl);
233 if (retval) { 231 if (retval) {
@@ -412,12 +410,12 @@ static void handle_button_press_event(struct slot *p_slot)
412 p_slot->state = BLINKINGOFF_STATE; 410 p_slot->state = BLINKINGOFF_STATE;
413 ctrl_info(ctrl, 411 ctrl_info(ctrl,
414 "PCI slot #%s - powering off due to button " 412 "PCI slot #%s - powering off due to button "
415 "press.\n", p_slot->name); 413 "press.\n", slot_name(p_slot));
416 } else { 414 } else {
417 p_slot->state = BLINKINGON_STATE; 415 p_slot->state = BLINKINGON_STATE;
418 ctrl_info(ctrl, 416 ctrl_info(ctrl,
419 "PCI slot #%s - powering on due to button " 417 "PCI slot #%s - powering on due to button "
420 "press.\n", p_slot->name); 418 "press.\n", slot_name(p_slot));
421 } 419 }
422 /* blink green LED and turn off amber */ 420 /* blink green LED and turn off amber */
423 if (PWR_LED(ctrl)) 421 if (PWR_LED(ctrl))
@@ -434,7 +432,7 @@ static void handle_button_press_event(struct slot *p_slot)
434 * press the attention again before the 5 sec. limit 432 * press the attention again before the 5 sec. limit
435 * expires to cancel hot-add or hot-remove 433 * expires to cancel hot-add or hot-remove
436 */ 434 */
437 ctrl_info(ctrl, "Button cancel on Slot(%s)\n", p_slot->name); 435 ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
438 ctrl_dbg(ctrl, "%s: button cancel\n", __func__); 436 ctrl_dbg(ctrl, "%s: button cancel\n", __func__);
439 cancel_delayed_work(&p_slot->work); 437 cancel_delayed_work(&p_slot->work);
440 if (p_slot->state == BLINKINGOFF_STATE) { 438 if (p_slot->state == BLINKINGOFF_STATE) {
@@ -447,7 +445,7 @@ static void handle_button_press_event(struct slot *p_slot)
447 if (ATTN_LED(ctrl)) 445 if (ATTN_LED(ctrl))
448 p_slot->hpc_ops->set_attention_status(p_slot, 0); 446 p_slot->hpc_ops->set_attention_status(p_slot, 0);
449 ctrl_info(ctrl, "PCI slot #%s - action canceled " 447 ctrl_info(ctrl, "PCI slot #%s - action canceled "
450 "due to button press\n", p_slot->name); 448 "due to button press\n", slot_name(p_slot));
451 p_slot->state = STATIC_STATE; 449 p_slot->state = STATIC_STATE;
452 break; 450 break;
453 case POWEROFF_STATE: 451 case POWEROFF_STATE:
@@ -457,7 +455,7 @@ static void handle_button_press_event(struct slot *p_slot)
457 * this means that the previous attention button action 455 * this means that the previous attention button action
458 * to hot-add or hot-remove is undergoing 456 * to hot-add or hot-remove is undergoing
459 */ 457 */
460 ctrl_info(ctrl, "Button ignore on Slot(%s)\n", p_slot->name); 458 ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
461 update_slot_info(p_slot); 459 update_slot_info(p_slot);
462 break; 460 break;
463 default: 461 default:
@@ -540,7 +538,7 @@ int pciehp_enable_slot(struct slot *p_slot)
540 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 538 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
541 if (rc || !getstatus) { 539 if (rc || !getstatus) {
542 ctrl_info(ctrl, "%s: no adapter on slot(%s)\n", 540 ctrl_info(ctrl, "%s: no adapter on slot(%s)\n",
543 __func__, p_slot->name); 541 __func__, slot_name(p_slot));
544 mutex_unlock(&p_slot->ctrl->crit_sect); 542 mutex_unlock(&p_slot->ctrl->crit_sect);
545 return -ENODEV; 543 return -ENODEV;
546 } 544 }
@@ -548,7 +546,7 @@ int pciehp_enable_slot(struct slot *p_slot)
548 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 546 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
549 if (rc || getstatus) { 547 if (rc || getstatus) {
550 ctrl_info(ctrl, "%s: latch open on slot(%s)\n", 548 ctrl_info(ctrl, "%s: latch open on slot(%s)\n",
551 __func__, p_slot->name); 549 __func__, slot_name(p_slot));
552 mutex_unlock(&p_slot->ctrl->crit_sect); 550 mutex_unlock(&p_slot->ctrl->crit_sect);
553 return -ENODEV; 551 return -ENODEV;
554 } 552 }
@@ -558,7 +556,7 @@ int pciehp_enable_slot(struct slot *p_slot)
558 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 556 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
559 if (rc || getstatus) { 557 if (rc || getstatus) {
560 ctrl_info(ctrl, "%s: already enabled on slot(%s)\n", 558 ctrl_info(ctrl, "%s: already enabled on slot(%s)\n",
561 __func__, p_slot->name); 559 __func__, slot_name(p_slot));
562 mutex_unlock(&p_slot->ctrl->crit_sect); 560 mutex_unlock(&p_slot->ctrl->crit_sect);
563 return -EINVAL; 561 return -EINVAL;
564 } 562 }
@@ -594,7 +592,7 @@ int pciehp_disable_slot(struct slot *p_slot)
594 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 592 ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
595 if (ret || !getstatus) { 593 if (ret || !getstatus) {
596 ctrl_info(ctrl, "%s: no adapter on slot(%s)\n", 594 ctrl_info(ctrl, "%s: no adapter on slot(%s)\n",
597 __func__, p_slot->name); 595 __func__, slot_name(p_slot));
598 mutex_unlock(&p_slot->ctrl->crit_sect); 596 mutex_unlock(&p_slot->ctrl->crit_sect);
599 return -ENODEV; 597 return -ENODEV;
600 } 598 }
@@ -604,7 +602,7 @@ int pciehp_disable_slot(struct slot *p_slot)
604 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 602 ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
605 if (ret || getstatus) { 603 if (ret || getstatus) {
606 ctrl_info(ctrl, "%s: latch open on slot(%s)\n", 604 ctrl_info(ctrl, "%s: latch open on slot(%s)\n",
607 __func__, p_slot->name); 605 __func__, slot_name(p_slot));
608 mutex_unlock(&p_slot->ctrl->crit_sect); 606 mutex_unlock(&p_slot->ctrl->crit_sect);
609 return -ENODEV; 607 return -ENODEV;
610 } 608 }
@@ -614,7 +612,7 @@ int pciehp_disable_slot(struct slot *p_slot)
614 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 612 ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
615 if (ret || !getstatus) { 613 if (ret || !getstatus) {
616 ctrl_info(ctrl, "%s: already disabled slot(%s)\n", 614 ctrl_info(ctrl, "%s: already disabled slot(%s)\n",
617 __func__, p_slot->name); 615 __func__, slot_name(p_slot));
618 mutex_unlock(&p_slot->ctrl->crit_sect); 616 mutex_unlock(&p_slot->ctrl->crit_sect);
619 return -EINVAL; 617 return -EINVAL;
620 } 618 }
@@ -645,14 +643,16 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
645 break; 643 break;
646 case POWERON_STATE: 644 case POWERON_STATE:
647 ctrl_info(ctrl, "Slot %s is already in powering on state\n", 645 ctrl_info(ctrl, "Slot %s is already in powering on state\n",
648 p_slot->name); 646 slot_name(p_slot));
649 break; 647 break;
650 case BLINKINGOFF_STATE: 648 case BLINKINGOFF_STATE:
651 case POWEROFF_STATE: 649 case POWEROFF_STATE:
652 ctrl_info(ctrl, "Already enabled on slot %s\n", p_slot->name); 650 ctrl_info(ctrl, "Already enabled on slot %s\n",
651 slot_name(p_slot));
653 break; 652 break;
654 default: 653 default:
655 ctrl_err(ctrl, "Not a valid state on slot %s\n", p_slot->name); 654 ctrl_err(ctrl, "Not a valid state on slot %s\n",
655 slot_name(p_slot));
656 break; 656 break;
657 } 657 }
658 mutex_unlock(&p_slot->lock); 658 mutex_unlock(&p_slot->lock);
@@ -678,14 +678,16 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
678 break; 678 break;
679 case POWEROFF_STATE: 679 case POWEROFF_STATE:
680 ctrl_info(ctrl, "Slot %s is already in powering off state\n", 680 ctrl_info(ctrl, "Slot %s is already in powering off state\n",
681 p_slot->name); 681 slot_name(p_slot));
682 break; 682 break;
683 case BLINKINGON_STATE: 683 case BLINKINGON_STATE:
684 case POWERON_STATE: 684 case POWERON_STATE:
685 ctrl_info(ctrl, "Already disabled on slot %s\n", p_slot->name); 685 ctrl_info(ctrl, "Already disabled on slot %s\n",
686 slot_name(p_slot));
686 break; 687 break;
687 default: 688 default:
688 ctrl_err(ctrl, "Not a valid state on slot %s\n", p_slot->name); 689 ctrl_err(ctrl, "Not a valid state on slot %s\n",
690 slot_name(p_slot));
689 break; 691 break;
690 } 692 }
691 mutex_unlock(&p_slot->lock); 693 mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 8e9530c4c36d..58c72d2cc217 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -125,6 +125,7 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
125/* Field definitions in Link Capabilities Register */ 125/* Field definitions in Link Capabilities Register */
126#define MAX_LNK_SPEED 0x000F 126#define MAX_LNK_SPEED 0x000F
127#define MAX_LNK_WIDTH 0x03F0 127#define MAX_LNK_WIDTH 0x03F0
128#define LINK_ACTIVE_REPORTING 0x00100000
128 129
129/* Link Width Encoding */ 130/* Link Width Encoding */
130#define LNK_X1 0x01 131#define LNK_X1 0x01
@@ -141,6 +142,7 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
141#define LNK_TRN_ERR 0x0400 142#define LNK_TRN_ERR 0x0400
142#define LNK_TRN 0x0800 143#define LNK_TRN 0x0800
143#define SLOT_CLK_CONF 0x1000 144#define SLOT_CLK_CONF 0x1000
145#define LINK_ACTIVE 0x2000
144 146
145/* Field definitions in Slot Capabilities Register */ 147/* Field definitions in Slot Capabilities Register */
146#define ATTN_BUTTN_PRSN 0x00000001 148#define ATTN_BUTTN_PRSN 0x00000001
@@ -368,11 +370,52 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
368 return retval; 370 return retval;
369} 371}
370 372
373static inline int check_link_active(struct controller *ctrl)
374{
375 u16 link_status;
376
377 if (pciehp_readw(ctrl, LNKSTATUS, &link_status))
378 return 0;
379 return !!(link_status & LINK_ACTIVE);
380}
381
382static void pcie_wait_link_active(struct controller *ctrl)
383{
384 int timeout = 1000;
385
386 if (check_link_active(ctrl))
387 return;
388 while (timeout > 0) {
389 msleep(10);
390 timeout -= 10;
391 if (check_link_active(ctrl))
392 return;
393 }
394 ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n");
395}
396
371static int hpc_check_lnk_status(struct controller *ctrl) 397static int hpc_check_lnk_status(struct controller *ctrl)
372{ 398{
373 u16 lnk_status; 399 u16 lnk_status;
374 int retval = 0; 400 int retval = 0;
375 401
402 /*
403 * Data Link Layer Link Active Reporting must be capable for
404 * hot-plug capable downstream port. But old controller might
405 * not implement it. In this case, we wait for 1000 ms.
406 */
407 if (ctrl->link_active_reporting){
408 /* Wait for Data Link Layer Link Active bit to be set */
409 pcie_wait_link_active(ctrl);
410 /*
411 * We must wait for 100 ms after the Data Link Layer
412 * Link Active bit reads 1b before initiating a
413 * configuration access to the hot added device.
414 */
415 msleep(100);
416 } else
417 msleep(1000);
418
376 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); 419 retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status);
377 if (retval) { 420 if (retval) {
378 ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", 421 ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
@@ -1061,7 +1104,6 @@ static int pcie_init_slot(struct controller *ctrl)
1061 slot->device = ctrl->slot_device_offset + slot->hp_slot; 1104 slot->device = ctrl->slot_device_offset + slot->hp_slot;
1062 slot->hpc_ops = ctrl->hpc_ops; 1105 slot->hpc_ops = ctrl->hpc_ops;
1063 slot->number = ctrl->first_slot; 1106 slot->number = ctrl->first_slot;
1064 snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
1065 mutex_init(&slot->lock); 1107 mutex_init(&slot->lock);
1066 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); 1108 INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
1067 list_add(&slot->slot_list, &ctrl->slot_list); 1109 list_add(&slot->slot_list, &ctrl->slot_list);
@@ -1132,7 +1174,7 @@ static inline void dbg_ctrl(struct controller *ctrl)
1132struct controller *pcie_init(struct pcie_device *dev) 1174struct controller *pcie_init(struct pcie_device *dev)
1133{ 1175{
1134 struct controller *ctrl; 1176 struct controller *ctrl;
1135 u32 slot_cap; 1177 u32 slot_cap, link_cap;
1136 struct pci_dev *pdev = dev->port; 1178 struct pci_dev *pdev = dev->port;
1137 1179
1138 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 1180 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
@@ -1148,11 +1190,11 @@ struct controller *pcie_init(struct pcie_device *dev)
1148 if (!ctrl->cap_base) { 1190 if (!ctrl->cap_base) {
1149 ctrl_err(ctrl, "%s: Cannot find PCI Express capability\n", 1191 ctrl_err(ctrl, "%s: Cannot find PCI Express capability\n",
1150 __func__); 1192 __func__);
1151 goto abort; 1193 goto abort_ctrl;
1152 } 1194 }
1153 if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) { 1195 if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) {
1154 ctrl_err(ctrl, "%s: Cannot read SLOTCAP register\n", __func__); 1196 ctrl_err(ctrl, "%s: Cannot read SLOTCAP register\n", __func__);
1155 goto abort; 1197 goto abort_ctrl;
1156 } 1198 }
1157 1199
1158 ctrl->slot_cap = slot_cap; 1200 ctrl->slot_cap = slot_cap;
@@ -1174,6 +1216,16 @@ struct controller *pcie_init(struct pcie_device *dev)
1174 !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) 1216 !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
1175 ctrl->no_cmd_complete = 1; 1217 ctrl->no_cmd_complete = 1;
1176 1218
1219 /* Check if Data Link Layer Link Active Reporting is implemented */
1220 if (pciehp_readl(ctrl, LNKCAP, &link_cap)) {
1221 ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
1222 goto abort_ctrl;
1223 }
1224 if (link_cap & LINK_ACTIVE_REPORTING) {
1225 ctrl_dbg(ctrl, "Link Active Reporting supported\n");
1226 ctrl->link_active_reporting = 1;
1227 }
1228
1177 /* Clear all remaining event bits in Slot Status register */ 1229 /* Clear all remaining event bits in Slot Status register */
1178 if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) 1230 if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f))
1179 goto abort_ctrl; 1231 goto abort_ctrl;
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index 50884507b8be..2ea9cf1a8d02 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -43,7 +43,7 @@ static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot)
43void dealloc_slot_struct(struct slot *slot) 43void dealloc_slot_struct(struct slot *slot)
44{ 44{
45 kfree(slot->hotplug_slot->info); 45 kfree(slot->hotplug_slot->info);
46 kfree(slot->hotplug_slot->name); 46 kfree(slot->name);
47 kfree(slot->hotplug_slot); 47 kfree(slot->hotplug_slot);
48 kfree(slot); 48 kfree(slot);
49} 49}
@@ -63,11 +63,9 @@ struct slot *alloc_slot_struct(struct device_node *dn,
63 GFP_KERNEL); 63 GFP_KERNEL);
64 if (!slot->hotplug_slot->info) 64 if (!slot->hotplug_slot->info)
65 goto error_hpslot; 65 goto error_hpslot;
66 slot->hotplug_slot->name = kmalloc(strlen(drc_name) + 1, GFP_KERNEL); 66 slot->name = kstrdup(drc_name, GFP_KERNEL);
67 if (!slot->hotplug_slot->name) 67 if (!slot->name)
68 goto error_info; 68 goto error_info;
69 slot->name = slot->hotplug_slot->name;
70 strcpy(slot->name, drc_name);
71 slot->dn = dn; 69 slot->dn = dn;
72 slot->index = drc_index; 70 slot->index = drc_index;
73 slot->power_domain = power_domain; 71 slot->power_domain = power_domain;
@@ -137,7 +135,7 @@ int rpaphp_register_slot(struct slot *slot)
137 slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn); 135 slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
138 else 136 else
139 slotno = -1; 137 slotno = -1;
140 retval = pci_hp_register(php_slot, slot->bus, slotno); 138 retval = pci_hp_register(php_slot, slot->bus, slotno, slot->name);
141 if (retval) { 139 if (retval) {
142 err("pci_hp_register failed with error %d\n", retval); 140 err("pci_hp_register failed with error %d\n", retval);
143 return retval; 141 return retval;
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 410fe0394a8e..d748698e31a2 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -161,7 +161,8 @@ static int sn_pci_bus_valid(struct pci_bus *pci_bus)
161} 161}
162 162
163static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot, 163static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
164 struct pci_bus *pci_bus, int device) 164 struct pci_bus *pci_bus, int device,
165 char *name)
165{ 166{
166 struct pcibus_info *pcibus_info; 167 struct pcibus_info *pcibus_info;
167 struct slot *slot; 168 struct slot *slot;
@@ -173,15 +174,9 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
173 return -ENOMEM; 174 return -ENOMEM;
174 bss_hotplug_slot->private = slot; 175 bss_hotplug_slot->private = slot;
175 176
176 bss_hotplug_slot->name = kmalloc(SN_SLOT_NAME_SIZE, GFP_KERNEL);
177 if (!bss_hotplug_slot->name) {
178 kfree(bss_hotplug_slot->private);
179 return -ENOMEM;
180 }
181
182 slot->device_num = device; 177 slot->device_num = device;
183 slot->pci_bus = pci_bus; 178 slot->pci_bus = pci_bus;
184 sprintf(bss_hotplug_slot->name, "%04x:%02x:%02x", 179 sprintf(name, "%04x:%02x:%02x",
185 pci_domain_nr(pci_bus), 180 pci_domain_nr(pci_bus),
186 ((u16)pcibus_info->pbi_buscommon.bs_persist_busnum), 181 ((u16)pcibus_info->pbi_buscommon.bs_persist_busnum),
187 device + 1); 182 device + 1);
@@ -608,7 +603,6 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
608static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot) 603static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
609{ 604{
610 kfree(bss_hotplug_slot->info); 605 kfree(bss_hotplug_slot->info);
611 kfree(bss_hotplug_slot->name);
612 kfree(bss_hotplug_slot->private); 606 kfree(bss_hotplug_slot->private);
613 kfree(bss_hotplug_slot); 607 kfree(bss_hotplug_slot);
614} 608}
@@ -618,6 +612,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
618 int device; 612 int device;
619 struct pci_slot *pci_slot; 613 struct pci_slot *pci_slot;
620 struct hotplug_slot *bss_hotplug_slot; 614 struct hotplug_slot *bss_hotplug_slot;
615 char name[SN_SLOT_NAME_SIZE];
621 int rc = 0; 616 int rc = 0;
622 617
623 /* 618 /*
@@ -645,15 +640,14 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
645 } 640 }
646 641
647 if (sn_hp_slot_private_alloc(bss_hotplug_slot, 642 if (sn_hp_slot_private_alloc(bss_hotplug_slot,
648 pci_bus, device)) { 643 pci_bus, device, name)) {
649 rc = -ENOMEM; 644 rc = -ENOMEM;
650 goto alloc_err; 645 goto alloc_err;
651 } 646 }
652
653 bss_hotplug_slot->ops = &sn_hotplug_slot_ops; 647 bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
654 bss_hotplug_slot->release = &sn_release_slot; 648 bss_hotplug_slot->release = &sn_release_slot;
655 649
656 rc = pci_hp_register(bss_hotplug_slot, pci_bus, device); 650 rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name);
657 if (rc) 651 if (rc)
658 goto register_err; 652 goto register_err;
659 653
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 8a026f750deb..4d9fed00e1d0 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -69,15 +69,13 @@ struct slot {
69 u8 state; 69 u8 state;
70 u8 presence_save; 70 u8 presence_save;
71 u8 pwr_save; 71 u8 pwr_save;
72 struct timer_list task_event;
73 u8 hp_slot;
74 struct controller *ctrl; 72 struct controller *ctrl;
75 struct hpc_ops *hpc_ops; 73 struct hpc_ops *hpc_ops;
76 struct hotplug_slot *hotplug_slot; 74 struct hotplug_slot *hotplug_slot;
77 struct list_head slot_list; 75 struct list_head slot_list;
78 char name[SLOT_NAME_SIZE];
79 struct delayed_work work; /* work for button event */ 76 struct delayed_work work; /* work for button event */
80 struct mutex lock; 77 struct mutex lock;
78 u8 hp_slot;
81}; 79};
82 80
83struct event_info { 81struct event_info {
@@ -169,6 +167,11 @@ extern void cleanup_slots(struct controller *ctrl);
169extern void shpchp_queue_pushbutton_work(struct work_struct *work); 167extern void shpchp_queue_pushbutton_work(struct work_struct *work);
170extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev); 168extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
171 169
170static inline const char *slot_name(struct slot *slot)
171{
172 return hotplug_slot_name(slot->hotplug_slot);
173}
174
172#ifdef CONFIG_ACPI 175#ifdef CONFIG_ACPI
173#include <linux/pci-acpi.h> 176#include <linux/pci-acpi.h>
174static inline int get_hp_params_from_firmware(struct pci_dev *dev, 177static inline int get_hp_params_from_firmware(struct pci_dev *dev,
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index cc38615395f1..7af9191df4d6 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -89,7 +89,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
89{ 89{
90 struct slot *slot = hotplug_slot->private; 90 struct slot *slot = hotplug_slot->private;
91 91
92 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 92 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
93 93
94 kfree(slot->hotplug_slot->info); 94 kfree(slot->hotplug_slot->info);
95 kfree(slot->hotplug_slot); 95 kfree(slot->hotplug_slot);
@@ -101,8 +101,9 @@ static int init_slots(struct controller *ctrl)
101 struct slot *slot; 101 struct slot *slot;
102 struct hotplug_slot *hotplug_slot; 102 struct hotplug_slot *hotplug_slot;
103 struct hotplug_slot_info *info; 103 struct hotplug_slot_info *info;
104 char name[SLOT_NAME_SIZE];
104 int retval = -ENOMEM; 105 int retval = -ENOMEM;
105 int i, len, dup = 1; 106 int i;
106 107
107 for (i = 0; i < ctrl->num_slots; i++) { 108 for (i = 0; i < ctrl->num_slots; i++) {
108 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 109 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
@@ -119,8 +120,6 @@ static int init_slots(struct controller *ctrl)
119 goto error_hpslot; 120 goto error_hpslot;
120 hotplug_slot->info = info; 121 hotplug_slot->info = info;
121 122
122 hotplug_slot->name = slot->name;
123
124 slot->hp_slot = i; 123 slot->hp_slot = i;
125 slot->ctrl = ctrl; 124 slot->ctrl = ctrl;
126 slot->bus = ctrl->pci_dev->subordinate->number; 125 slot->bus = ctrl->pci_dev->subordinate->number;
@@ -133,37 +132,24 @@ static int init_slots(struct controller *ctrl)
133 /* register this slot with the hotplug pci core */ 132 /* register this slot with the hotplug pci core */
134 hotplug_slot->private = slot; 133 hotplug_slot->private = slot;
135 hotplug_slot->release = &release_slot; 134 hotplug_slot->release = &release_slot;
136 snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number); 135 snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
137 hotplug_slot->ops = &shpchp_hotplug_slot_ops; 136 hotplug_slot->ops = &shpchp_hotplug_slot_ops;
138 137
139 get_power_status(hotplug_slot, &info->power_status);
140 get_attention_status(hotplug_slot, &info->attention_status);
141 get_latch_status(hotplug_slot, &info->latch_status);
142 get_adapter_status(hotplug_slot, &info->adapter_status);
143
144 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " 138 dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
145 "slot_device_offset=%x\n", slot->bus, slot->device, 139 "slot_device_offset=%x\n", slot->bus, slot->device,
146 slot->hp_slot, slot->number, ctrl->slot_device_offset); 140 slot->hp_slot, slot->number, ctrl->slot_device_offset);
147duplicate_name:
148 retval = pci_hp_register(slot->hotplug_slot, 141 retval = pci_hp_register(slot->hotplug_slot,
149 ctrl->pci_dev->subordinate, slot->device); 142 ctrl->pci_dev->subordinate, slot->device, name);
150 if (retval) { 143 if (retval) {
151 /*
152 * If slot N already exists, we'll try to create
153 * slot N-1, N-2 ... N-M, until we overflow.
154 */
155 if (retval == -EEXIST) {
156 len = snprintf(slot->name, SLOT_NAME_SIZE,
157 "%d-%d", slot->number, dup++);
158 if (len < SLOT_NAME_SIZE)
159 goto duplicate_name;
160 else
161 err("duplicate slot name overflow\n");
162 }
163 err("pci_hp_register failed with error %d\n", retval); 144 err("pci_hp_register failed with error %d\n", retval);
164 goto error_info; 145 goto error_info;
165 } 146 }
166 147
148 get_power_status(hotplug_slot, &info->power_status);
149 get_attention_status(hotplug_slot, &info->attention_status);
150 get_latch_status(hotplug_slot, &info->latch_status);
151 get_adapter_status(hotplug_slot, &info->adapter_status);
152
167 list_add(&slot->slot_list, &ctrl->slot_list); 153 list_add(&slot->slot_list, &ctrl->slot_list);
168 } 154 }
169 155
@@ -201,7 +187,7 @@ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status)
201{ 187{
202 struct slot *slot = get_slot(hotplug_slot); 188 struct slot *slot = get_slot(hotplug_slot);
203 189
204 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 190 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
205 191
206 hotplug_slot->info->attention_status = status; 192 hotplug_slot->info->attention_status = status;
207 slot->hpc_ops->set_attention_status(slot, status); 193 slot->hpc_ops->set_attention_status(slot, status);
@@ -213,7 +199,7 @@ static int enable_slot (struct hotplug_slot *hotplug_slot)
213{ 199{
214 struct slot *slot = get_slot(hotplug_slot); 200 struct slot *slot = get_slot(hotplug_slot);
215 201
216 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 202 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
217 203
218 return shpchp_sysfs_enable_slot(slot); 204 return shpchp_sysfs_enable_slot(slot);
219} 205}
@@ -222,7 +208,7 @@ static int disable_slot (struct hotplug_slot *hotplug_slot)
222{ 208{
223 struct slot *slot = get_slot(hotplug_slot); 209 struct slot *slot = get_slot(hotplug_slot);
224 210
225 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 211 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
226 212
227 return shpchp_sysfs_disable_slot(slot); 213 return shpchp_sysfs_disable_slot(slot);
228} 214}
@@ -232,7 +218,7 @@ static int get_power_status (struct hotplug_slot *hotplug_slot, u8 *value)
232 struct slot *slot = get_slot(hotplug_slot); 218 struct slot *slot = get_slot(hotplug_slot);
233 int retval; 219 int retval;
234 220
235 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 221 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
236 222
237 retval = slot->hpc_ops->get_power_status(slot, value); 223 retval = slot->hpc_ops->get_power_status(slot, value);
238 if (retval < 0) 224 if (retval < 0)
@@ -246,7 +232,7 @@ static int get_attention_status (struct hotplug_slot *hotplug_slot, u8 *value)
246 struct slot *slot = get_slot(hotplug_slot); 232 struct slot *slot = get_slot(hotplug_slot);
247 int retval; 233 int retval;
248 234
249 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 235 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
250 236
251 retval = slot->hpc_ops->get_attention_status(slot, value); 237 retval = slot->hpc_ops->get_attention_status(slot, value);
252 if (retval < 0) 238 if (retval < 0)
@@ -260,7 +246,7 @@ static int get_latch_status (struct hotplug_slot *hotplug_slot, u8 *value)
260 struct slot *slot = get_slot(hotplug_slot); 246 struct slot *slot = get_slot(hotplug_slot);
261 int retval; 247 int retval;
262 248
263 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 249 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
264 250
265 retval = slot->hpc_ops->get_latch_status(slot, value); 251 retval = slot->hpc_ops->get_latch_status(slot, value);
266 if (retval < 0) 252 if (retval < 0)
@@ -274,7 +260,7 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
274 struct slot *slot = get_slot(hotplug_slot); 260 struct slot *slot = get_slot(hotplug_slot);
275 int retval; 261 int retval;
276 262
277 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 263 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
278 264
279 retval = slot->hpc_ops->get_adapter_status(slot, value); 265 retval = slot->hpc_ops->get_adapter_status(slot, value);
280 if (retval < 0) 266 if (retval < 0)
@@ -289,7 +275,7 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
289 struct slot *slot = get_slot(hotplug_slot); 275 struct slot *slot = get_slot(hotplug_slot);
290 int retval; 276 int retval;
291 277
292 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 278 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
293 279
294 retval = slot->hpc_ops->get_max_bus_speed(slot, value); 280 retval = slot->hpc_ops->get_max_bus_speed(slot, value);
295 if (retval < 0) 281 if (retval < 0)
@@ -303,7 +289,7 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
303 struct slot *slot = get_slot(hotplug_slot); 289 struct slot *slot = get_slot(hotplug_slot);
304 int retval; 290 int retval;
305 291
306 dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); 292 dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
307 293
308 retval = slot->hpc_ops->get_cur_bus_speed(slot, value); 294 retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
309 if (retval < 0) 295 if (retval < 0)
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index dfb53932dfbc..919b1ee44313 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -70,7 +70,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
70 /* 70 /*
71 * Button pressed - See if need to TAKE ACTION!!! 71 * Button pressed - See if need to TAKE ACTION!!!
72 */ 72 */
73 info("Button pressed on Slot(%s)\n", p_slot->name); 73 info("Button pressed on Slot(%s)\n", slot_name(p_slot));
74 event_type = INT_BUTTON_PRESS; 74 event_type = INT_BUTTON_PRESS;
75 75
76 queue_interrupt_event(p_slot, event_type); 76 queue_interrupt_event(p_slot, event_type);
@@ -98,7 +98,7 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
98 /* 98 /*
99 * Switch opened 99 * Switch opened
100 */ 100 */
101 info("Latch open on Slot(%s)\n", p_slot->name); 101 info("Latch open on Slot(%s)\n", slot_name(p_slot));
102 event_type = INT_SWITCH_OPEN; 102 event_type = INT_SWITCH_OPEN;
103 if (p_slot->pwr_save && p_slot->presence_save) { 103 if (p_slot->pwr_save && p_slot->presence_save) {
104 event_type = INT_POWER_FAULT; 104 event_type = INT_POWER_FAULT;
@@ -108,7 +108,7 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
108 /* 108 /*
109 * Switch closed 109 * Switch closed
110 */ 110 */
111 info("Latch close on Slot(%s)\n", p_slot->name); 111 info("Latch close on Slot(%s)\n", slot_name(p_slot));
112 event_type = INT_SWITCH_CLOSE; 112 event_type = INT_SWITCH_CLOSE;
113 } 113 }
114 114
@@ -135,13 +135,13 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
135 /* 135 /*
136 * Card Present 136 * Card Present
137 */ 137 */
138 info("Card present on Slot(%s)\n", p_slot->name); 138 info("Card present on Slot(%s)\n", slot_name(p_slot));
139 event_type = INT_PRESENCE_ON; 139 event_type = INT_PRESENCE_ON;
140 } else { 140 } else {
141 /* 141 /*
142 * Not Present 142 * Not Present
143 */ 143 */
144 info("Card not present on Slot(%s)\n", p_slot->name); 144 info("Card not present on Slot(%s)\n", slot_name(p_slot));
145 event_type = INT_PRESENCE_OFF; 145 event_type = INT_PRESENCE_OFF;
146 } 146 }
147 147
@@ -164,14 +164,14 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
164 /* 164 /*
165 * Power fault Cleared 165 * Power fault Cleared
166 */ 166 */
167 info("Power fault cleared on Slot(%s)\n", p_slot->name); 167 info("Power fault cleared on Slot(%s)\n", slot_name(p_slot));
168 p_slot->status = 0x00; 168 p_slot->status = 0x00;
169 event_type = INT_POWER_FAULT_CLEAR; 169 event_type = INT_POWER_FAULT_CLEAR;
170 } else { 170 } else {
171 /* 171 /*
172 * Power fault 172 * Power fault
173 */ 173 */
174 info("Power fault on Slot(%s)\n", p_slot->name); 174 info("Power fault on Slot(%s)\n", slot_name(p_slot));
175 event_type = INT_POWER_FAULT; 175 event_type = INT_POWER_FAULT;
176 /* set power fault status for this board */ 176 /* set power fault status for this board */
177 p_slot->status = 0xFF; 177 p_slot->status = 0xFF;
@@ -493,11 +493,11 @@ static void handle_button_press_event(struct slot *p_slot)
493 if (getstatus) { 493 if (getstatus) {
494 p_slot->state = BLINKINGOFF_STATE; 494 p_slot->state = BLINKINGOFF_STATE;
495 info("PCI slot #%s - powering off due to button " 495 info("PCI slot #%s - powering off due to button "
496 "press.\n", p_slot->name); 496 "press.\n", slot_name(p_slot));
497 } else { 497 } else {
498 p_slot->state = BLINKINGON_STATE; 498 p_slot->state = BLINKINGON_STATE;
499 info("PCI slot #%s - powering on due to button " 499 info("PCI slot #%s - powering on due to button "
500 "press.\n", p_slot->name); 500 "press.\n", slot_name(p_slot));
501 } 501 }
502 /* blink green LED and turn off amber */ 502 /* blink green LED and turn off amber */
503 p_slot->hpc_ops->green_led_blink(p_slot); 503 p_slot->hpc_ops->green_led_blink(p_slot);
@@ -512,7 +512,7 @@ static void handle_button_press_event(struct slot *p_slot)
512 * press the attention again before the 5 sec. limit 512 * press the attention again before the 5 sec. limit
513 * expires to cancel hot-add or hot-remove 513 * expires to cancel hot-add or hot-remove
514 */ 514 */
515 info("Button cancel on Slot(%s)\n", p_slot->name); 515 info("Button cancel on Slot(%s)\n", slot_name(p_slot));
516 dbg("%s: button cancel\n", __func__); 516 dbg("%s: button cancel\n", __func__);
517 cancel_delayed_work(&p_slot->work); 517 cancel_delayed_work(&p_slot->work);
518 if (p_slot->state == BLINKINGOFF_STATE) 518 if (p_slot->state == BLINKINGOFF_STATE)
@@ -521,7 +521,7 @@ static void handle_button_press_event(struct slot *p_slot)
521 p_slot->hpc_ops->green_led_off(p_slot); 521 p_slot->hpc_ops->green_led_off(p_slot);
522 p_slot->hpc_ops->set_attention_status(p_slot, 0); 522 p_slot->hpc_ops->set_attention_status(p_slot, 0);
523 info("PCI slot #%s - action canceled due to button press\n", 523 info("PCI slot #%s - action canceled due to button press\n",
524 p_slot->name); 524 slot_name(p_slot));
525 p_slot->state = STATIC_STATE; 525 p_slot->state = STATIC_STATE;
526 break; 526 break;
527 case POWEROFF_STATE: 527 case POWEROFF_STATE:
@@ -531,7 +531,7 @@ static void handle_button_press_event(struct slot *p_slot)
531 * this means that the previous attention button action 531 * this means that the previous attention button action
532 * to hot-add or hot-remove is undergoing 532 * to hot-add or hot-remove is undergoing
533 */ 533 */
534 info("Button ignore on Slot(%s)\n", p_slot->name); 534 info("Button ignore on Slot(%s)\n", slot_name(p_slot));
535 update_slot_info(p_slot); 535 update_slot_info(p_slot);
536 break; 536 break;
537 default: 537 default:
@@ -574,17 +574,17 @@ static int shpchp_enable_slot (struct slot *p_slot)
574 mutex_lock(&p_slot->ctrl->crit_sect); 574 mutex_lock(&p_slot->ctrl->crit_sect);
575 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 575 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
576 if (rc || !getstatus) { 576 if (rc || !getstatus) {
577 info("No adapter on slot(%s)\n", p_slot->name); 577 info("No adapter on slot(%s)\n", slot_name(p_slot));
578 goto out; 578 goto out;
579 } 579 }
580 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 580 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
581 if (rc || getstatus) { 581 if (rc || getstatus) {
582 info("Latch open on slot(%s)\n", p_slot->name); 582 info("Latch open on slot(%s)\n", slot_name(p_slot));
583 goto out; 583 goto out;
584 } 584 }
585 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 585 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
586 if (rc || getstatus) { 586 if (rc || getstatus) {
587 info("Already enabled on slot(%s)\n", p_slot->name); 587 info("Already enabled on slot(%s)\n", slot_name(p_slot));
588 goto out; 588 goto out;
589 } 589 }
590 590
@@ -633,17 +633,17 @@ static int shpchp_disable_slot (struct slot *p_slot)
633 633
634 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); 634 rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
635 if (rc || !getstatus) { 635 if (rc || !getstatus) {
636 info("No adapter on slot(%s)\n", p_slot->name); 636 info("No adapter on slot(%s)\n", slot_name(p_slot));
637 goto out; 637 goto out;
638 } 638 }
639 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); 639 rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
640 if (rc || getstatus) { 640 if (rc || getstatus) {
641 info("Latch open on slot(%s)\n", p_slot->name); 641 info("Latch open on slot(%s)\n", slot_name(p_slot));
642 goto out; 642 goto out;
643 } 643 }
644 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); 644 rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
645 if (rc || !getstatus) { 645 if (rc || !getstatus) {
646 info("Already disabled slot(%s)\n", p_slot->name); 646 info("Already disabled slot(%s)\n", slot_name(p_slot));
647 goto out; 647 goto out;
648 } 648 }
649 649
@@ -671,14 +671,14 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot)
671 break; 671 break;
672 case POWERON_STATE: 672 case POWERON_STATE:
673 info("Slot %s is already in powering on state\n", 673 info("Slot %s is already in powering on state\n",
674 p_slot->name); 674 slot_name(p_slot));
675 break; 675 break;
676 case BLINKINGOFF_STATE: 676 case BLINKINGOFF_STATE:
677 case POWEROFF_STATE: 677 case POWEROFF_STATE:
678 info("Already enabled on slot %s\n", p_slot->name); 678 info("Already enabled on slot %s\n", slot_name(p_slot));
679 break; 679 break;
680 default: 680 default:
681 err("Not a valid state on slot %s\n", p_slot->name); 681 err("Not a valid state on slot %s\n", slot_name(p_slot));
682 break; 682 break;
683 } 683 }
684 mutex_unlock(&p_slot->lock); 684 mutex_unlock(&p_slot->lock);
@@ -703,14 +703,14 @@ int shpchp_sysfs_disable_slot(struct slot *p_slot)
703 break; 703 break;
704 case POWEROFF_STATE: 704 case POWEROFF_STATE:
705 info("Slot %s is already in powering off state\n", 705 info("Slot %s is already in powering off state\n",
706 p_slot->name); 706 slot_name(p_slot));
707 break; 707 break;
708 case BLINKINGON_STATE: 708 case BLINKINGON_STATE:
709 case POWERON_STATE: 709 case POWERON_STATE:
710 info("Already disabled on slot %s\n", p_slot->name); 710 info("Already disabled on slot %s\n", slot_name(p_slot));
711 break; 711 break;
712 default: 712 default:
713 err("Not a valid state on slot %s\n", p_slot->name); 713 err("Not a valid state on slot %s\n", slot_name(p_slot));
714 break; 714 break;
715 } 715 }
716 mutex_unlock(&p_slot->lock); 716 mutex_unlock(&p_slot->lock);
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 8b51e10b7783..a2692724b68f 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -18,6 +18,7 @@
18 * Author: Ashok Raj <ashok.raj@intel.com> 18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Shaohua Li <shaohua.li@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * Author: Fenghua Yu <fenghua.yu@intel.com>
21 */ 22 */
22 23
23#include <linux/init.h> 24#include <linux/init.h>
@@ -35,11 +36,13 @@
35#include <linux/timer.h> 36#include <linux/timer.h>
36#include <linux/iova.h> 37#include <linux/iova.h>
37#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
38#include <asm/proto.h> /* force_iommu in this header in x86-64*/
39#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
40#include <asm/iommu.h> 40#include <asm/iommu.h>
41#include "pci.h" 41#include "pci.h"
42 42
43#define ROOT_SIZE VTD_PAGE_SIZE
44#define CONTEXT_SIZE VTD_PAGE_SIZE
45
43#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) 46#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
44#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) 47#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
45 48
@@ -199,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
199 spin_unlock_irqrestore(&iommu->lock, flags); 202 spin_unlock_irqrestore(&iommu->lock, flags);
200 return NULL; 203 return NULL;
201 } 204 }
202 __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); 205 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
203 phy_addr = virt_to_phys((void *)context); 206 phy_addr = virt_to_phys((void *)context);
204 set_root_value(root, phy_addr); 207 set_root_value(root, phy_addr);
205 set_root_present(root); 208 set_root_present(root);
@@ -345,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
345 return NULL; 348 return NULL;
346 } 349 }
347 __iommu_flush_cache(domain->iommu, tmp_page, 350 __iommu_flush_cache(domain->iommu, tmp_page,
348 PAGE_SIZE_4K); 351 PAGE_SIZE);
349 dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); 352 dma_set_pte_addr(*pte, virt_to_phys(tmp_page));
350 /* 353 /*
351 * high level table always sets r/w, last level page 354 * high level table always sets r/w, last level page
@@ -408,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
408 start &= (((u64)1) << addr_width) - 1; 411 start &= (((u64)1) << addr_width) - 1;
409 end &= (((u64)1) << addr_width) - 1; 412 end &= (((u64)1) << addr_width) - 1;
410 /* in case it's partial page */ 413 /* in case it's partial page */
411 start = PAGE_ALIGN_4K(start); 414 start = PAGE_ALIGN(start);
412 end &= PAGE_MASK_4K; 415 end &= PAGE_MASK;
413 416
414 /* we don't need lock here, nobody else touches the iova range */ 417 /* we don't need lock here, nobody else touches the iova range */
415 while (start < end) { 418 while (start < end) {
416 dma_pte_clear_one(domain, start); 419 dma_pte_clear_one(domain, start);
417 start += PAGE_SIZE_4K; 420 start += VTD_PAGE_SIZE;
418 } 421 }
419} 422}
420 423
@@ -468,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
468 if (!root) 471 if (!root)
469 return -ENOMEM; 472 return -ENOMEM;
470 473
471 __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); 474 __iommu_flush_cache(iommu, root, ROOT_SIZE);
472 475
473 spin_lock_irqsave(&iommu->lock, flags); 476 spin_lock_irqsave(&iommu->lock, flags);
474 iommu->root_entry = root; 477 iommu->root_entry = root;
@@ -567,27 +570,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
567 return 0; 570 return 0;
568} 571}
569 572
570static int inline iommu_flush_context_global(struct intel_iommu *iommu,
571 int non_present_entry_flush)
572{
573 return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
574 non_present_entry_flush);
575}
576
577static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did,
578 int non_present_entry_flush)
579{
580 return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
581 non_present_entry_flush);
582}
583
584static int inline iommu_flush_context_device(struct intel_iommu *iommu,
585 u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush)
586{
587 return __iommu_flush_context(iommu, did, source_id, function_mask,
588 DMA_CCMD_DEVICE_INVL, non_present_entry_flush);
589}
590
591/* return value determine if we need a write buffer flush */ 573/* return value determine if we need a write buffer flush */
592static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, 574static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
593 u64 addr, unsigned int size_order, u64 type, 575 u64 addr, unsigned int size_order, u64 type,
@@ -655,37 +637,25 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
655 printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); 637 printk(KERN_ERR"IOMMU: flush IOTLB failed\n");
656 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) 638 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
657 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", 639 pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
658 DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); 640 (unsigned long long)DMA_TLB_IIRG(type),
641 (unsigned long long)DMA_TLB_IAIG(val));
659 /* flush iotlb entry will implicitly flush write buffer */ 642 /* flush iotlb entry will implicitly flush write buffer */
660 return 0; 643 return 0;
661} 644}
662 645
663static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu,
664 int non_present_entry_flush)
665{
666 return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
667 non_present_entry_flush);
668}
669
670static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did,
671 int non_present_entry_flush)
672{
673 return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
674 non_present_entry_flush);
675}
676
677static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 646static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
678 u64 addr, unsigned int pages, int non_present_entry_flush) 647 u64 addr, unsigned int pages, int non_present_entry_flush)
679{ 648{
680 unsigned int mask; 649 unsigned int mask;
681 650
682 BUG_ON(addr & (~PAGE_MASK_4K)); 651 BUG_ON(addr & (~VTD_PAGE_MASK));
683 BUG_ON(pages == 0); 652 BUG_ON(pages == 0);
684 653
685 /* Fallback to domain selective flush if no PSI support */ 654 /* Fallback to domain selective flush if no PSI support */
686 if (!cap_pgsel_inv(iommu->cap)) 655 if (!cap_pgsel_inv(iommu->cap))
687 return iommu_flush_iotlb_dsi(iommu, did, 656 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
688 non_present_entry_flush); 657 DMA_TLB_DSI_FLUSH,
658 non_present_entry_flush);
689 659
690 /* 660 /*
691 * PSI requires page size to be 2 ^ x, and the base address is naturally 661 * PSI requires page size to be 2 ^ x, and the base address is naturally
@@ -694,11 +664,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
694 mask = ilog2(__roundup_pow_of_two(pages)); 664 mask = ilog2(__roundup_pow_of_two(pages));
695 /* Fallback to domain selective flush if size is too big */ 665 /* Fallback to domain selective flush if size is too big */
696 if (mask > cap_max_amask_val(iommu->cap)) 666 if (mask > cap_max_amask_val(iommu->cap))
697 return iommu_flush_iotlb_dsi(iommu, did, 667 return iommu->flush.flush_iotlb(iommu, did, 0, 0,
698 non_present_entry_flush); 668 DMA_TLB_DSI_FLUSH, non_present_entry_flush);
699 669
700 return __iommu_flush_iotlb(iommu, did, addr, mask, 670 return iommu->flush.flush_iotlb(iommu, did, addr, mask,
701 DMA_TLB_PSI_FLUSH, non_present_entry_flush); 671 DMA_TLB_PSI_FLUSH,
672 non_present_entry_flush);
702} 673}
703 674
704static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) 675static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -831,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
831} 802}
832 803
833static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, 804static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
834 u8 fault_reason, u16 source_id, u64 addr) 805 u8 fault_reason, u16 source_id, unsigned long long addr)
835{ 806{
836 const char *reason; 807 const char *reason;
837 808
@@ -1084,9 +1055,9 @@ static void dmar_init_reserved_ranges(void)
1084 if (!r->flags || !(r->flags & IORESOURCE_MEM)) 1055 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1085 continue; 1056 continue;
1086 addr = r->start; 1057 addr = r->start;
1087 addr &= PAGE_MASK_4K; 1058 addr &= PAGE_MASK;
1088 size = r->end - addr; 1059 size = r->end - addr;
1089 size = PAGE_ALIGN_4K(size); 1060 size = PAGE_ALIGN(size);
1090 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), 1061 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr),
1091 IOVA_PFN(size + addr) - 1); 1062 IOVA_PFN(size + addr) - 1);
1092 if (!iova) 1063 if (!iova)
@@ -1148,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1148 domain->pgd = (struct dma_pte *)alloc_pgtable_page(); 1119 domain->pgd = (struct dma_pte *)alloc_pgtable_page();
1149 if (!domain->pgd) 1120 if (!domain->pgd)
1150 return -ENOMEM; 1121 return -ENOMEM;
1151 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); 1122 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1152 return 0; 1123 return 0;
1153} 1124}
1154 1125
@@ -1164,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain)
1164 /* destroy iovas */ 1135 /* destroy iovas */
1165 put_iova_domain(&domain->iovad); 1136 put_iova_domain(&domain->iovad);
1166 end = DOMAIN_MAX_ADDR(domain->gaw); 1137 end = DOMAIN_MAX_ADDR(domain->gaw);
1167 end = end & (~PAGE_MASK_4K); 1138 end = end & (~PAGE_MASK);
1168 1139
1169 /* clear ptes */ 1140 /* clear ptes */
1170 dma_pte_clear_range(domain, 0, end); 1141 dma_pte_clear_range(domain, 0, end);
@@ -1204,11 +1175,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1204 __iommu_flush_cache(iommu, context, sizeof(*context)); 1175 __iommu_flush_cache(iommu, context, sizeof(*context));
1205 1176
1206 /* it's a non-present to present mapping */ 1177 /* it's a non-present to present mapping */
1207 if (iommu_flush_context_device(iommu, domain->id, 1178 if (iommu->flush.flush_context(iommu, domain->id,
1208 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1)) 1179 (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
1180 DMA_CCMD_DEVICE_INVL, 1))
1209 iommu_flush_write_buffer(iommu); 1181 iommu_flush_write_buffer(iommu);
1210 else 1182 else
1211 iommu_flush_iotlb_dsi(iommu, 0, 0); 1183 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
1184
1212 spin_unlock_irqrestore(&iommu->lock, flags); 1185 spin_unlock_irqrestore(&iommu->lock, flags);
1213 return 0; 1186 return 0;
1214} 1187}
@@ -1283,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1283 u64 start_pfn, end_pfn; 1256 u64 start_pfn, end_pfn;
1284 struct dma_pte *pte; 1257 struct dma_pte *pte;
1285 int index; 1258 int index;
1259 int addr_width = agaw_to_width(domain->agaw);
1260
1261 hpa &= (((u64)1) << addr_width) - 1;
1286 1262
1287 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) 1263 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
1288 return -EINVAL; 1264 return -EINVAL;
1289 iova &= PAGE_MASK_4K; 1265 iova &= PAGE_MASK;
1290 start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; 1266 start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT;
1291 end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; 1267 end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT;
1292 index = 0; 1268 index = 0;
1293 while (start_pfn < end_pfn) { 1269 while (start_pfn < end_pfn) {
1294 pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); 1270 pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index);
1295 if (!pte) 1271 if (!pte)
1296 return -ENOMEM; 1272 return -ENOMEM;
1297 /* We don't need lock here, nobody else 1273 /* We don't need lock here, nobody else
1298 * touches the iova range 1274 * touches the iova range
1299 */ 1275 */
1300 BUG_ON(dma_pte_addr(*pte)); 1276 BUG_ON(dma_pte_addr(*pte));
1301 dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); 1277 dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT);
1302 dma_set_pte_prot(*pte, prot); 1278 dma_set_pte_prot(*pte, prot);
1303 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); 1279 __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
1304 start_pfn++; 1280 start_pfn++;
@@ -1310,8 +1286,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
1310static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) 1286static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
1311{ 1287{
1312 clear_context_table(domain->iommu, bus, devfn); 1288 clear_context_table(domain->iommu, bus, devfn);
1313 iommu_flush_context_global(domain->iommu, 0); 1289 domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0,
1314 iommu_flush_iotlb_global(domain->iommu, 0); 1290 DMA_CCMD_GLOBAL_INVL, 0);
1291 domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0,
1292 DMA_TLB_GLOBAL_FLUSH, 0);
1315} 1293}
1316 1294
1317static void domain_remove_dev_info(struct dmar_domain *domain) 1295static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1474,11 +1452,13 @@ error:
1474 return find_domain(pdev); 1452 return find_domain(pdev);
1475} 1453}
1476 1454
1477static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) 1455static int iommu_prepare_identity_map(struct pci_dev *pdev,
1456 unsigned long long start,
1457 unsigned long long end)
1478{ 1458{
1479 struct dmar_domain *domain; 1459 struct dmar_domain *domain;
1480 unsigned long size; 1460 unsigned long size;
1481 u64 base; 1461 unsigned long long base;
1482 int ret; 1462 int ret;
1483 1463
1484 printk(KERN_INFO 1464 printk(KERN_INFO
@@ -1490,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end)
1490 return -ENOMEM; 1470 return -ENOMEM;
1491 1471
1492 /* The address might not be aligned */ 1472 /* The address might not be aligned */
1493 base = start & PAGE_MASK_4K; 1473 base = start & PAGE_MASK;
1494 size = end - base; 1474 size = end - base;
1495 size = PAGE_ALIGN_4K(size); 1475 size = PAGE_ALIGN(size);
1496 if (!reserve_iova(&domain->iovad, IOVA_PFN(base), 1476 if (!reserve_iova(&domain->iovad, IOVA_PFN(base),
1497 IOVA_PFN(base + size) - 1)) { 1477 IOVA_PFN(base + size) - 1)) {
1498 printk(KERN_ERR "IOMMU: reserve iova failed\n"); 1478 printk(KERN_ERR "IOMMU: reserve iova failed\n");
@@ -1662,6 +1642,28 @@ int __init init_dmars(void)
1662 } 1642 }
1663 } 1643 }
1664 1644
1645 for_each_drhd_unit(drhd) {
1646 if (drhd->ignored)
1647 continue;
1648
1649 iommu = drhd->iommu;
1650 if (dmar_enable_qi(iommu)) {
1651 /*
1652 * Queued Invalidate not enabled, use Register Based
1653 * Invalidate
1654 */
1655 iommu->flush.flush_context = __iommu_flush_context;
1656 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
1657 printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
1658 "invalidation\n", drhd->reg_base_addr);
1659 } else {
1660 iommu->flush.flush_context = qi_flush_context;
1661 iommu->flush.flush_iotlb = qi_flush_iotlb;
1662 printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
1663 "invalidation\n", drhd->reg_base_addr);
1664 }
1665 }
1666
1665 /* 1667 /*
1666 * For each rmrr 1668 * For each rmrr
1667 * for each dev attached to rmrr 1669 * for each dev attached to rmrr
@@ -1714,9 +1716,10 @@ int __init init_dmars(void)
1714 1716
1715 iommu_set_root_entry(iommu); 1717 iommu_set_root_entry(iommu);
1716 1718
1717 iommu_flush_context_global(iommu, 0); 1719 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
1718 iommu_flush_iotlb_global(iommu, 0); 1720 0);
1719 1721 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
1722 0);
1720 iommu_disable_protect_mem_regions(iommu); 1723 iommu_disable_protect_mem_regions(iommu);
1721 1724
1722 ret = iommu_enable_translation(iommu); 1725 ret = iommu_enable_translation(iommu);
@@ -1738,8 +1741,8 @@ error:
1738static inline u64 aligned_size(u64 host_addr, size_t size) 1741static inline u64 aligned_size(u64 host_addr, size_t size)
1739{ 1742{
1740 u64 addr; 1743 u64 addr;
1741 addr = (host_addr & (~PAGE_MASK_4K)) + size; 1744 addr = (host_addr & (~PAGE_MASK)) + size;
1742 return PAGE_ALIGN_4K(addr); 1745 return PAGE_ALIGN(addr);
1743} 1746}
1744 1747
1745struct iova * 1748struct iova *
@@ -1753,20 +1756,20 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
1753 return NULL; 1756 return NULL;
1754 1757
1755 piova = alloc_iova(&domain->iovad, 1758 piova = alloc_iova(&domain->iovad,
1756 size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); 1759 size >> PAGE_SHIFT, IOVA_PFN(end), 1);
1757 return piova; 1760 return piova;
1758} 1761}
1759 1762
1760static struct iova * 1763static struct iova *
1761__intel_alloc_iova(struct device *dev, struct dmar_domain *domain, 1764__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
1762 size_t size) 1765 size_t size, u64 dma_mask)
1763{ 1766{
1764 struct pci_dev *pdev = to_pci_dev(dev); 1767 struct pci_dev *pdev = to_pci_dev(dev);
1765 struct iova *iova = NULL; 1768 struct iova *iova = NULL;
1766 1769
1767 if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) { 1770 if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac)
1768 iova = iommu_alloc_iova(domain, size, pdev->dma_mask); 1771 iova = iommu_alloc_iova(domain, size, dma_mask);
1769 } else { 1772 else {
1770 /* 1773 /*
1771 * First try to allocate an io virtual address in 1774 * First try to allocate an io virtual address in
1772 * DMA_32BIT_MASK and if that fails then try allocating 1775 * DMA_32BIT_MASK and if that fails then try allocating
@@ -1774,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
1774 */ 1777 */
1775 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); 1778 iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
1776 if (!iova) 1779 if (!iova)
1777 iova = iommu_alloc_iova(domain, size, pdev->dma_mask); 1780 iova = iommu_alloc_iova(domain, size, dma_mask);
1778 } 1781 }
1779 1782
1780 if (!iova) { 1783 if (!iova) {
@@ -1813,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
1813 return domain; 1816 return domain;
1814} 1817}
1815 1818
1816static dma_addr_t 1819static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
1817intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) 1820 size_t size, int dir, u64 dma_mask)
1818{ 1821{
1819 struct pci_dev *pdev = to_pci_dev(hwdev); 1822 struct pci_dev *pdev = to_pci_dev(hwdev);
1820 struct dmar_domain *domain; 1823 struct dmar_domain *domain;
1821 unsigned long start_paddr; 1824 phys_addr_t start_paddr;
1822 struct iova *iova; 1825 struct iova *iova;
1823 int prot = 0; 1826 int prot = 0;
1824 int ret; 1827 int ret;
@@ -1833,11 +1836,11 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1833 1836
1834 size = aligned_size((u64)paddr, size); 1837 size = aligned_size((u64)paddr, size);
1835 1838
1836 iova = __intel_alloc_iova(hwdev, domain, size); 1839 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
1837 if (!iova) 1840 if (!iova)
1838 goto error; 1841 goto error;
1839 1842
1840 start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; 1843 start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
1841 1844
1842 /* 1845 /*
1843 * Check if DMAR supports zero-length reads on write only 1846 * Check if DMAR supports zero-length reads on write only
@@ -1855,30 +1858,33 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir)
1855 * is not a big problem 1858 * is not a big problem
1856 */ 1859 */
1857 ret = domain_page_mapping(domain, start_paddr, 1860 ret = domain_page_mapping(domain, start_paddr,
1858 ((u64)paddr) & PAGE_MASK_4K, size, prot); 1861 ((u64)paddr) & PAGE_MASK, size, prot);
1859 if (ret) 1862 if (ret)
1860 goto error; 1863 goto error;
1861 1864
1862 pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
1863 pci_name(pdev), size, (u64)paddr,
1864 size, (u64)start_paddr, dir);
1865
1866 /* it's a non-present to present mapping */ 1865 /* it's a non-present to present mapping */
1867 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, 1866 ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
1868 start_paddr, size >> PAGE_SHIFT_4K, 1); 1867 start_paddr, size >> VTD_PAGE_SHIFT, 1);
1869 if (ret) 1868 if (ret)
1870 iommu_flush_write_buffer(domain->iommu); 1869 iommu_flush_write_buffer(domain->iommu);
1871 1870
1872 return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); 1871 return start_paddr + ((u64)paddr & (~PAGE_MASK));
1873 1872
1874error: 1873error:
1875 if (iova) 1874 if (iova)
1876 __free_iova(&domain->iovad, iova); 1875 __free_iova(&domain->iovad, iova);
1877 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", 1876 printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
1878 pci_name(pdev), size, (u64)paddr, dir); 1877 pci_name(pdev), size, (unsigned long long)paddr, dir);
1879 return 0; 1878 return 0;
1880} 1879}
1881 1880
1881dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr,
1882 size_t size, int dir)
1883{
1884 return __intel_map_single(hwdev, paddr, size, dir,
1885 to_pci_dev(hwdev)->dma_mask);
1886}
1887
1882static void flush_unmaps(void) 1888static void flush_unmaps(void)
1883{ 1889{
1884 int i, j; 1890 int i, j;
@@ -1891,7 +1897,8 @@ static void flush_unmaps(void)
1891 struct intel_iommu *iommu = 1897 struct intel_iommu *iommu =
1892 deferred_flush[i].domain[0]->iommu; 1898 deferred_flush[i].domain[0]->iommu;
1893 1899
1894 iommu_flush_iotlb_global(iommu, 0); 1900 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
1901 DMA_TLB_GLOBAL_FLUSH, 0);
1895 for (j = 0; j < deferred_flush[i].next; j++) { 1902 for (j = 0; j < deferred_flush[i].next; j++) {
1896 __free_iova(&deferred_flush[i].domain[j]->iovad, 1903 __free_iova(&deferred_flush[i].domain[j]->iovad,
1897 deferred_flush[i].iova[j]); 1904 deferred_flush[i].iova[j]);
@@ -1936,8 +1943,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
1936 spin_unlock_irqrestore(&async_umap_flush_lock, flags); 1943 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
1937} 1944}
1938 1945
1939static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, 1946void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
1940 size_t size, int dir) 1947 int dir)
1941{ 1948{
1942 struct pci_dev *pdev = to_pci_dev(dev); 1949 struct pci_dev *pdev = to_pci_dev(dev);
1943 struct dmar_domain *domain; 1950 struct dmar_domain *domain;
@@ -1953,11 +1960,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1953 if (!iova) 1960 if (!iova)
1954 return; 1961 return;
1955 1962
1956 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 1963 start_addr = iova->pfn_lo << PAGE_SHIFT;
1957 size = aligned_size((u64)dev_addr, size); 1964 size = aligned_size((u64)dev_addr, size);
1958 1965
1959 pr_debug("Device %s unmapping: %lx@%llx\n", 1966 pr_debug("Device %s unmapping: %lx@%llx\n",
1960 pci_name(pdev), size, (u64)start_addr); 1967 pci_name(pdev), size, (unsigned long long)start_addr);
1961 1968
1962 /* clear the whole page */ 1969 /* clear the whole page */
1963 dma_pte_clear_range(domain, start_addr, start_addr + size); 1970 dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -1965,7 +1972,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1965 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 1972 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
1966 if (intel_iommu_strict) { 1973 if (intel_iommu_strict) {
1967 if (iommu_flush_iotlb_psi(domain->iommu, 1974 if (iommu_flush_iotlb_psi(domain->iommu,
1968 domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) 1975 domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
1969 iommu_flush_write_buffer(domain->iommu); 1976 iommu_flush_write_buffer(domain->iommu);
1970 /* free iova */ 1977 /* free iova */
1971 __free_iova(&domain->iovad, iova); 1978 __free_iova(&domain->iovad, iova);
@@ -1978,13 +1985,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1978 } 1985 }
1979} 1986}
1980 1987
1981static void * intel_alloc_coherent(struct device *hwdev, size_t size, 1988void *intel_alloc_coherent(struct device *hwdev, size_t size,
1982 dma_addr_t *dma_handle, gfp_t flags) 1989 dma_addr_t *dma_handle, gfp_t flags)
1983{ 1990{
1984 void *vaddr; 1991 void *vaddr;
1985 int order; 1992 int order;
1986 1993
1987 size = PAGE_ALIGN_4K(size); 1994 size = PAGE_ALIGN(size);
1988 order = get_order(size); 1995 order = get_order(size);
1989 flags &= ~(GFP_DMA | GFP_DMA32); 1996 flags &= ~(GFP_DMA | GFP_DMA32);
1990 1997
@@ -1993,19 +2000,21 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size,
1993 return NULL; 2000 return NULL;
1994 memset(vaddr, 0, size); 2001 memset(vaddr, 0, size);
1995 2002
1996 *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL); 2003 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size,
2004 DMA_BIDIRECTIONAL,
2005 hwdev->coherent_dma_mask);
1997 if (*dma_handle) 2006 if (*dma_handle)
1998 return vaddr; 2007 return vaddr;
1999 free_pages((unsigned long)vaddr, order); 2008 free_pages((unsigned long)vaddr, order);
2000 return NULL; 2009 return NULL;
2001} 2010}
2002 2011
2003static void intel_free_coherent(struct device *hwdev, size_t size, 2012void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
2004 void *vaddr, dma_addr_t dma_handle) 2013 dma_addr_t dma_handle)
2005{ 2014{
2006 int order; 2015 int order;
2007 2016
2008 size = PAGE_ALIGN_4K(size); 2017 size = PAGE_ALIGN(size);
2009 order = get_order(size); 2018 order = get_order(size);
2010 2019
2011 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); 2020 intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
@@ -2013,8 +2022,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
2013} 2022}
2014 2023
2015#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) 2024#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
2016static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 2025
2017 int nelems, int dir) 2026void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2027 int nelems, int dir)
2018{ 2028{
2019 int i; 2029 int i;
2020 struct pci_dev *pdev = to_pci_dev(hwdev); 2030 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -2038,7 +2048,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2038 size += aligned_size((u64)addr, sg->length); 2048 size += aligned_size((u64)addr, sg->length);
2039 } 2049 }
2040 2050
2041 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 2051 start_addr = iova->pfn_lo << PAGE_SHIFT;
2042 2052
2043 /* clear the whole page */ 2053 /* clear the whole page */
2044 dma_pte_clear_range(domain, start_addr, start_addr + size); 2054 dma_pte_clear_range(domain, start_addr, start_addr + size);
@@ -2046,7 +2056,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
2046 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2056 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
2047 2057
2048 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, 2058 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
2049 size >> PAGE_SHIFT_4K, 0)) 2059 size >> VTD_PAGE_SHIFT, 0))
2050 iommu_flush_write_buffer(domain->iommu); 2060 iommu_flush_write_buffer(domain->iommu);
2051 2061
2052 /* free iova */ 2062 /* free iova */
@@ -2067,8 +2077,8 @@ static int intel_nontranslate_map_sg(struct device *hddev,
2067 return nelems; 2077 return nelems;
2068} 2078}
2069 2079
2070static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, 2080int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
2071 int nelems, int dir) 2081 int dir)
2072{ 2082{
2073 void *addr; 2083 void *addr;
2074 int i; 2084 int i;
@@ -2096,7 +2106,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2096 size += aligned_size((u64)addr, sg->length); 2106 size += aligned_size((u64)addr, sg->length);
2097 } 2107 }
2098 2108
2099 iova = __intel_alloc_iova(hwdev, domain, size); 2109 iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
2100 if (!iova) { 2110 if (!iova) {
2101 sglist->dma_length = 0; 2111 sglist->dma_length = 0;
2102 return 0; 2112 return 0;
@@ -2112,14 +2122,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2112 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 2122 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
2113 prot |= DMA_PTE_WRITE; 2123 prot |= DMA_PTE_WRITE;
2114 2124
2115 start_addr = iova->pfn_lo << PAGE_SHIFT_4K; 2125 start_addr = iova->pfn_lo << PAGE_SHIFT;
2116 offset = 0; 2126 offset = 0;
2117 for_each_sg(sglist, sg, nelems, i) { 2127 for_each_sg(sglist, sg, nelems, i) {
2118 addr = SG_ENT_VIRT_ADDRESS(sg); 2128 addr = SG_ENT_VIRT_ADDRESS(sg);
2119 addr = (void *)virt_to_phys(addr); 2129 addr = (void *)virt_to_phys(addr);
2120 size = aligned_size((u64)addr, sg->length); 2130 size = aligned_size((u64)addr, sg->length);
2121 ret = domain_page_mapping(domain, start_addr + offset, 2131 ret = domain_page_mapping(domain, start_addr + offset,
2122 ((u64)addr) & PAGE_MASK_4K, 2132 ((u64)addr) & PAGE_MASK,
2123 size, prot); 2133 size, prot);
2124 if (ret) { 2134 if (ret) {
2125 /* clear the page */ 2135 /* clear the page */
@@ -2133,14 +2143,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
2133 return 0; 2143 return 0;
2134 } 2144 }
2135 sg->dma_address = start_addr + offset + 2145 sg->dma_address = start_addr + offset +
2136 ((u64)addr & (~PAGE_MASK_4K)); 2146 ((u64)addr & (~PAGE_MASK));
2137 sg->dma_length = sg->length; 2147 sg->dma_length = sg->length;
2138 offset += size; 2148 offset += size;
2139 } 2149 }
2140 2150
2141 /* it's a non-present to present mapping */ 2151 /* it's a non-present to present mapping */
2142 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, 2152 if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
2143 start_addr, offset >> PAGE_SHIFT_4K, 1)) 2153 start_addr, offset >> VTD_PAGE_SHIFT, 1))
2144 iommu_flush_write_buffer(domain->iommu); 2154 iommu_flush_write_buffer(domain->iommu);
2145 return nelems; 2155 return nelems;
2146} 2156}
@@ -2180,7 +2190,6 @@ static inline int iommu_devinfo_cache_init(void)
2180 sizeof(struct device_domain_info), 2190 sizeof(struct device_domain_info),
2181 0, 2191 0,
2182 SLAB_HWCACHE_ALIGN, 2192 SLAB_HWCACHE_ALIGN,
2183
2184 NULL); 2193 NULL);
2185 if (!iommu_devinfo_cache) { 2194 if (!iommu_devinfo_cache) {
2186 printk(KERN_ERR "Couldn't create devinfo cache\n"); 2195 printk(KERN_ERR "Couldn't create devinfo cache\n");
@@ -2198,7 +2207,6 @@ static inline int iommu_iova_cache_init(void)
2198 sizeof(struct iova), 2207 sizeof(struct iova),
2199 0, 2208 0,
2200 SLAB_HWCACHE_ALIGN, 2209 SLAB_HWCACHE_ALIGN,
2201
2202 NULL); 2210 NULL);
2203 if (!iommu_iova_cache) { 2211 if (!iommu_iova_cache) {
2204 printk(KERN_ERR "Couldn't create iova cache\n"); 2212 printk(KERN_ERR "Couldn't create iova cache\n");
@@ -2327,7 +2335,7 @@ void intel_iommu_domain_exit(struct dmar_domain *domain)
2327 return; 2335 return;
2328 2336
2329 end = DOMAIN_MAX_ADDR(domain->gaw); 2337 end = DOMAIN_MAX_ADDR(domain->gaw);
2330 end = end & (~PAGE_MASK_4K); 2338 end = end & (~VTD_PAGE_MASK);
2331 2339
2332 /* clear ptes */ 2340 /* clear ptes */
2333 dma_pte_clear_range(domain, 0, end); 2341 dma_pte_clear_range(domain, 0, end);
@@ -2423,6 +2431,6 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
2423 if (pte) 2431 if (pte)
2424 pfn = dma_pte_addr(*pte); 2432 pfn = dma_pte_addr(*pte);
2425 2433
2426 return pfn >> PAGE_SHIFT_4K; 2434 return pfn >> VTD_PAGE_SHIFT;
2427} 2435}
2428EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); 2436EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d2812013fd22..74801f7df9c9 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -759,3 +759,24 @@ void pci_msi_init_pci_dev(struct pci_dev *dev)
759{ 759{
760 INIT_LIST_HEAD(&dev->msi_list); 760 INIT_LIST_HEAD(&dev->msi_list);
761} 761}
762
763#ifdef CONFIG_ACPI
764#include <linux/acpi.h>
765#include <linux/pci-acpi.h>
766static void __devinit msi_acpi_init(void)
767{
768 if (acpi_pci_disabled)
769 return;
770 pci_osc_support_set(OSC_MSI_SUPPORT);
771 pcie_osc_support_set(OSC_MSI_SUPPORT);
772}
773#else
774static inline void msi_acpi_init(void) { }
775#endif /* CONFIG_ACPI */
776
777void __devinit msi_init(void)
778{
779 if (!pci_msi_enable)
780 return;
781 msi_acpi_init();
782}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 89a2f0fa10f9..dfe7c8e1b185 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -24,17 +24,17 @@ struct acpi_osc_data {
24 acpi_handle handle; 24 acpi_handle handle;
25 u32 support_set; 25 u32 support_set;
26 u32 control_set; 26 u32 control_set;
27 int is_queried;
28 u32 query_result;
29 struct list_head sibiling; 27 struct list_head sibiling;
30}; 28};
31static LIST_HEAD(acpi_osc_data_list); 29static LIST_HEAD(acpi_osc_data_list);
32 30
33struct acpi_osc_args { 31struct acpi_osc_args {
34 u32 capbuf[3]; 32 u32 capbuf[3];
35 u32 query_result; 33 u32 ctrl_result;
36}; 34};
37 35
36static DEFINE_MUTEX(pci_acpi_lock);
37
38static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) 38static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
39{ 39{
40 struct acpi_osc_data *data; 40 struct acpi_osc_data *data;
@@ -108,9 +108,8 @@ static acpi_status acpi_run_osc(acpi_handle handle,
108 goto out_kfree; 108 goto out_kfree;
109 } 109 }
110out_success: 110out_success:
111 if (flags & OSC_QUERY_ENABLE) 111 osc_args->ctrl_result =
112 osc_args->query_result = 112 *((u32 *)(out_obj->buffer.pointer + 8));
113 *((u32 *)(out_obj->buffer.pointer + 8));
114 status = AE_OK; 113 status = AE_OK;
115 114
116out_kfree: 115out_kfree:
@@ -118,41 +117,53 @@ out_kfree:
118 return status; 117 return status;
119} 118}
120 119
121static acpi_status acpi_query_osc(acpi_handle handle, 120static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data,
122 u32 level, void *context, void **retval) 121 u32 *result)
123{ 122{
124 acpi_status status; 123 acpi_status status;
125 struct acpi_osc_data *osc_data; 124 u32 support_set;
126 u32 flags = (unsigned long)context, support_set;
127 acpi_handle tmp;
128 struct acpi_osc_args osc_args; 125 struct acpi_osc_args osc_args;
129 126
130 status = acpi_get_handle(handle, "_OSC", &tmp);
131 if (ACPI_FAILURE(status))
132 return status;
133
134 osc_data = acpi_get_osc_data(handle);
135 if (!osc_data) {
136 printk(KERN_ERR "acpi osc data array is full\n");
137 return AE_ERROR;
138 }
139
140 /* do _OSC query for all possible controls */ 127 /* do _OSC query for all possible controls */
141 support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS); 128 support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
142 osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 129 osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
143 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set; 130 osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
144 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; 131 osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
145 132
146 status = acpi_run_osc(handle, &osc_args); 133 status = acpi_run_osc(osc_data->handle, &osc_args);
147 if (ACPI_SUCCESS(status)) { 134 if (ACPI_SUCCESS(status)) {
148 osc_data->support_set = support_set; 135 osc_data->support_set = support_set;
149 osc_data->query_result = osc_args.query_result; 136 *result = osc_args.ctrl_result;
150 osc_data->is_queried = 1;
151 } 137 }
152 138
153 return status; 139 return status;
154} 140}
155 141
142static acpi_status acpi_query_osc(acpi_handle handle,
143 u32 level, void *context, void **retval)
144{
145 acpi_status status;
146 struct acpi_osc_data *osc_data;
147 u32 flags = (unsigned long)context, dummy;
148 acpi_handle tmp;
149
150 status = acpi_get_handle(handle, "_OSC", &tmp);
151 if (ACPI_FAILURE(status))
152 return AE_OK;
153
154 mutex_lock(&pci_acpi_lock);
155 osc_data = acpi_get_osc_data(handle);
156 if (!osc_data) {
157 printk(KERN_ERR "acpi osc data array is full\n");
158 goto out;
159 }
160
161 __acpi_query_osc(flags, osc_data, &dummy);
162out:
163 mutex_unlock(&pci_acpi_lock);
164 return AE_OK;
165}
166
156/** 167/**
157 * __pci_osc_support_set - register OS support to Firmware 168 * __pci_osc_support_set - register OS support to Firmware
158 * @flags: OS support bits 169 * @flags: OS support bits
@@ -181,7 +192,7 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid)
181acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) 192acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
182{ 193{
183 acpi_status status; 194 acpi_status status;
184 u32 ctrlset, control_set; 195 u32 ctrlset, control_set, result;
185 acpi_handle tmp; 196 acpi_handle tmp;
186 struct acpi_osc_data *osc_data; 197 struct acpi_osc_data *osc_data;
187 struct acpi_osc_args osc_args; 198 struct acpi_osc_args osc_args;
@@ -190,19 +201,28 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
190 if (ACPI_FAILURE(status)) 201 if (ACPI_FAILURE(status))
191 return status; 202 return status;
192 203
204 mutex_lock(&pci_acpi_lock);
193 osc_data = acpi_get_osc_data(handle); 205 osc_data = acpi_get_osc_data(handle);
194 if (!osc_data) { 206 if (!osc_data) {
195 printk(KERN_ERR "acpi osc data array is full\n"); 207 printk(KERN_ERR "acpi osc data array is full\n");
196 return AE_ERROR; 208 status = AE_ERROR;
209 goto out;
197 } 210 }
198 211
199 ctrlset = (flags & OSC_CONTROL_MASKS); 212 ctrlset = (flags & OSC_CONTROL_MASKS);
200 if (!ctrlset) 213 if (!ctrlset) {
201 return AE_TYPE; 214 status = AE_TYPE;
215 goto out;
216 }
202 217
203 if (osc_data->is_queried && 218 status = __acpi_query_osc(osc_data->support_set, osc_data, &result);
204 ((osc_data->query_result & ctrlset) != ctrlset)) 219 if (ACPI_FAILURE(status))
205 return AE_SUPPORT; 220 goto out;
221
222 if ((result & ctrlset) != ctrlset) {
223 status = AE_SUPPORT;
224 goto out;
225 }
206 226
207 control_set = osc_data->control_set | ctrlset; 227 control_set = osc_data->control_set | ctrlset;
208 osc_args.capbuf[OSC_QUERY_TYPE] = 0; 228 osc_args.capbuf[OSC_QUERY_TYPE] = 0;
@@ -211,7 +231,8 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
211 status = acpi_run_osc(handle, &osc_args); 231 status = acpi_run_osc(handle, &osc_args);
212 if (ACPI_SUCCESS(status)) 232 if (ACPI_SUCCESS(status))
213 osc_data->control_set = control_set; 233 osc_data->control_set = control_set;
214 234out:
235 mutex_unlock(&pci_acpi_lock);
215 return status; 236 return status;
216} 237}
217EXPORT_SYMBOL(pci_osc_control_set); 238EXPORT_SYMBOL(pci_osc_control_set);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4db261e13e69..533aeb5fcbe4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -18,6 +18,7 @@
18#include <linux/log2.h> 18#include <linux/log2.h>
19#include <linux/pci-aspm.h> 19#include <linux/pci-aspm.h>
20#include <linux/pm_wakeup.h> 20#include <linux/pm_wakeup.h>
21#include <linux/interrupt.h>
21#include <asm/dma.h> /* isa_dma_bridge_buggy */ 22#include <asm/dma.h> /* isa_dma_bridge_buggy */
22#include "pci.h" 23#include "pci.h"
23 24
@@ -1746,6 +1747,103 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary);
1746#endif 1747#endif
1747 1748
1748/** 1749/**
1750 * pci_execute_reset_function() - Reset a PCI device function
1751 * @dev: Device function to reset
1752 *
1753 * Some devices allow an individual function to be reset without affecting
1754 * other functions in the same device. The PCI device must be responsive
1755 * to PCI config space in order to use this function.
1756 *
1757 * The device function is presumed to be unused when this function is called.
1758 * Resetting the device will make the contents of PCI configuration space
1759 * random, so any caller of this must be prepared to reinitialise the
1760 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
1761 * etc.
1762 *
1763 * Returns 0 if the device function was successfully reset or -ENOTTY if the
1764 * device doesn't support resetting a single function.
1765 */
1766int pci_execute_reset_function(struct pci_dev *dev)
1767{
1768 u16 status;
1769 u32 cap;
1770 int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
1771
1772 if (!exppos)
1773 return -ENOTTY;
1774 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
1775 if (!(cap & PCI_EXP_DEVCAP_FLR))
1776 return -ENOTTY;
1777
1778 pci_block_user_cfg_access(dev);
1779
1780 /* Wait for Transaction Pending bit clean */
1781 msleep(100);
1782 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
1783 if (status & PCI_EXP_DEVSTA_TRPND) {
1784 dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
1785 "sleeping for 1 second\n");
1786 ssleep(1);
1787 pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
1788 if (status & PCI_EXP_DEVSTA_TRPND)
1789 dev_info(&dev->dev, "Still busy after 1s; "
1790 "proceeding with reset anyway\n");
1791 }
1792
1793 pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
1794 PCI_EXP_DEVCTL_BCR_FLR);
1795 mdelay(100);
1796
1797 pci_unblock_user_cfg_access(dev);
1798 return 0;
1799}
1800EXPORT_SYMBOL_GPL(pci_execute_reset_function);
1801
1802/**
1803 * pci_reset_function() - quiesce and reset a PCI device function
1804 * @dev: Device function to reset
1805 *
1806 * Some devices allow an individual function to be reset without affecting
1807 * other functions in the same device. The PCI device must be responsive
1808 * to PCI config space in order to use this function.
1809 *
1810 * This function does not just reset the PCI portion of a device, but
1811 * clears all the state associated with the device. This function differs
1812 * from pci_execute_reset_function in that it saves and restores device state
1813 * over the reset.
1814 *
1815 * Returns 0 if the device function was successfully reset or -ENOTTY if the
1816 * device doesn't support resetting a single function.
1817 */
1818int pci_reset_function(struct pci_dev *dev)
1819{
1820 u32 cap;
1821 int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
1822 int r;
1823
1824 if (!exppos)
1825 return -ENOTTY;
1826 pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
1827 if (!(cap & PCI_EXP_DEVCAP_FLR))
1828 return -ENOTTY;
1829
1830 if (!dev->msi_enabled && !dev->msix_enabled)
1831 disable_irq(dev->irq);
1832 pci_save_state(dev);
1833
1834 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
1835
1836 r = pci_execute_reset_function(dev);
1837
1838 pci_restore_state(dev);
1839 if (!dev->msi_enabled && !dev->msix_enabled)
1840 enable_irq(dev->irq);
1841
1842 return r;
1843}
1844EXPORT_SYMBOL_GPL(pci_reset_function);
1845
1846/**
1749 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 1847 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
1750 * @dev: PCI device to query 1848 * @dev: PCI device to query
1751 * 1849 *
@@ -1933,6 +2031,9 @@ static int __devinit pci_init(void)
1933 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2031 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1934 pci_fixup_device(pci_fixup_final, dev); 2032 pci_fixup_device(pci_fixup_final, dev);
1935 } 2033 }
2034
2035 msi_init();
2036
1936 return 0; 2037 return 0;
1937} 2038}
1938 2039
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index b205ab866a1d..9de87e9f98f5 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -98,9 +98,11 @@ extern unsigned int pci_pm_d3_delay;
98#ifdef CONFIG_PCI_MSI 98#ifdef CONFIG_PCI_MSI
99void pci_no_msi(void); 99void pci_no_msi(void);
100extern void pci_msi_init_pci_dev(struct pci_dev *dev); 100extern void pci_msi_init_pci_dev(struct pci_dev *dev);
101extern void __devinit msi_init(void);
101#else 102#else
102static inline void pci_no_msi(void) { } 103static inline void pci_no_msi(void) { }
103static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } 104static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
105static inline void msi_init(void) { }
104#endif 106#endif
105 107
106#ifdef CONFIG_PCIEAER 108#ifdef CONFIG_PCIEAER
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index aaaf0a1fed22..6f1e51d77bce 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -480,19 +480,27 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
480 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 480 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
481 u32 buses, i, j = 0; 481 u32 buses, i, j = 0;
482 u16 bctl; 482 u16 bctl;
483 int broken = 0;
483 484
484 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 485 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
485 486
486 dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n", 487 dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n",
487 buses & 0xffffff, pass); 488 buses & 0xffffff, pass);
488 489
490 /* Check if setup is sensible at all */
491 if (!pass &&
492 ((buses & 0xff) != bus->number || ((buses >> 8) & 0xff) <= bus->number)) {
493 dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
494 broken = 1;
495 }
496
489 /* Disable MasterAbortMode during probing to avoid reporting 497 /* Disable MasterAbortMode during probing to avoid reporting
490 of bus errors (in some architectures) */ 498 of bus errors (in some architectures) */
491 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); 499 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
492 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, 500 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
493 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); 501 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
494 502
495 if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) { 503 if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus && !broken) {
496 unsigned int cmax, busnr; 504 unsigned int cmax, busnr;
497 /* 505 /*
498 * Bus already configured by firmware, process it in the first 506 * Bus already configured by firmware, process it in the first
@@ -530,7 +538,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
530 * do in the second pass. 538 * do in the second pass.
531 */ 539 */
532 if (!pass) { 540 if (!pass) {
533 if (pcibios_assign_all_busses()) 541 if (pcibios_assign_all_busses() || broken)
534 /* Temporarily disable forwarding of the 542 /* Temporarily disable forwarding of the
535 configuration cycles on all bridges in 543 configuration cycles on all bridges in
536 this bus segment to avoid possible 544 this bus segment to avoid possible
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bbf66ea8fd87..96cf8ecd04ce 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -43,6 +43,20 @@ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev)
43DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); 43DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
44DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); 44DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
45 45
46/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
47int forbid_dac __read_mostly;
48EXPORT_SYMBOL(forbid_dac);
49
50static __devinit void via_no_dac(struct pci_dev *dev)
51{
52 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
53 dev_info(&dev->dev,
54 "VIA PCI bridge detected. Disabling DAC.\n");
55 forbid_dac = 1;
56 }
57}
58DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
59
46/* Deal with broken BIOS'es that neglect to enable passive release, 60/* Deal with broken BIOS'es that neglect to enable passive release,
47 which can cause problems in combination with the 82441FX/PPro MTRRs */ 61 which can cause problems in combination with the 82441FX/PPro MTRRs */
48static void quirk_passive_release(struct pci_dev *dev) 62static void quirk_passive_release(struct pci_dev *dev)
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 4edfc4731bd4..5af8bd538149 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -166,6 +166,7 @@ struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
166{ 166{
167 struct pci_dev *pdev; 167 struct pci_dev *pdev;
168 168
169 pci_dev_get(from);
169 pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); 170 pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
170 pci_dev_put(pdev); 171 pci_dev_put(pdev);
171 return pdev; 172 return pdev;
@@ -270,12 +271,8 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
270 struct pci_dev *pdev = NULL; 271 struct pci_dev *pdev = NULL;
271 272
272 WARN_ON(in_interrupt()); 273 WARN_ON(in_interrupt());
273 if (from) { 274 if (from)
274 /* FIXME 275 dev_start = &from->dev;
275 * take the cast off, when bus_find_device is made const.
276 */
277 dev_start = (struct device *)&from->dev;
278 }
279 dev = bus_find_device(&pci_bus_type, dev_start, (void *)id, 276 dev = bus_find_device(&pci_bus_type, dev_start, (void *)id,
280 match_pci_dev_by_id); 277 match_pci_dev_by_id);
281 if (dev) 278 if (dev)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 0c6db03698ea..4dd1c3e157ae 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -78,18 +78,100 @@ static struct kobj_type pci_slot_ktype = {
78 .default_attrs = pci_slot_default_attrs, 78 .default_attrs = pci_slot_default_attrs,
79}; 79};
80 80
81static char *make_slot_name(const char *name)
82{
83 char *new_name;
84 int len, max, dup;
85
86 new_name = kstrdup(name, GFP_KERNEL);
87 if (!new_name)
88 return NULL;
89
90 /*
91 * Make sure we hit the realloc case the first time through the
92 * loop. 'len' will be strlen(name) + 3 at that point which is
93 * enough space for "name-X" and the trailing NUL.
94 */
95 len = strlen(name) + 2;
96 max = 1;
97 dup = 1;
98
99 for (;;) {
100 struct kobject *dup_slot;
101 dup_slot = kset_find_obj(pci_slots_kset, new_name);
102 if (!dup_slot)
103 break;
104 kobject_put(dup_slot);
105 if (dup == max) {
106 len++;
107 max *= 10;
108 kfree(new_name);
109 new_name = kmalloc(len, GFP_KERNEL);
110 if (!new_name)
111 break;
112 }
113 sprintf(new_name, "%s-%d", name, dup++);
114 }
115
116 return new_name;
117}
118
119static int rename_slot(struct pci_slot *slot, const char *name)
120{
121 int result = 0;
122 char *slot_name;
123
124 if (strcmp(pci_slot_name(slot), name) == 0)
125 return result;
126
127 slot_name = make_slot_name(name);
128 if (!slot_name)
129 return -ENOMEM;
130
131 result = kobject_rename(&slot->kobj, slot_name);
132 kfree(slot_name);
133
134 return result;
135}
136
137static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr)
138{
139 struct pci_slot *slot;
140 /*
141 * We already hold pci_bus_sem so don't worry
142 */
143 list_for_each_entry(slot, &parent->slots, list)
144 if (slot->number == slot_nr) {
145 kobject_get(&slot->kobj);
146 return slot;
147 }
148
149 return NULL;
150}
151
81/** 152/**
82 * pci_create_slot - create or increment refcount for physical PCI slot 153 * pci_create_slot - create or increment refcount for physical PCI slot
83 * @parent: struct pci_bus of parent bridge 154 * @parent: struct pci_bus of parent bridge
84 * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder 155 * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder
85 * @name: user visible string presented in /sys/bus/pci/slots/<name> 156 * @name: user visible string presented in /sys/bus/pci/slots/<name>
157 * @hotplug: set if caller is hotplug driver, NULL otherwise
86 * 158 *
87 * PCI slots have first class attributes such as address, speed, width, 159 * PCI slots have first class attributes such as address, speed, width,
88 * and a &struct pci_slot is used to manage them. This interface will 160 * and a &struct pci_slot is used to manage them. This interface will
89 * either return a new &struct pci_slot to the caller, or if the pci_slot 161 * either return a new &struct pci_slot to the caller, or if the pci_slot
90 * already exists, its refcount will be incremented. 162 * already exists, its refcount will be incremented.
91 * 163 *
92 * Slots are uniquely identified by a @pci_bus, @slot_nr, @name tuple. 164 * Slots are uniquely identified by a @pci_bus, @slot_nr tuple.
165 *
166 * There are known platforms with broken firmware that assign the same
167 * name to multiple slots. Workaround these broken platforms by renaming
168 * the slots on behalf of the caller. If firmware assigns name N to
169 * multiple slots:
170 *
171 * The first slot is assigned N
172 * The second slot is assigned N-1
173 * The third slot is assigned N-2
174 * etc.
93 * 175 *
94 * Placeholder slots: 176 * Placeholder slots:
95 * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify 177 * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify
@@ -98,61 +180,67 @@ static struct kobj_type pci_slot_ktype = {
98 * the slot. In this scenario, the caller may pass -1 for @slot_nr. 180 * the slot. In this scenario, the caller may pass -1 for @slot_nr.
99 * 181 *
100 * The following semantics are imposed when the caller passes @slot_nr == 182 * The following semantics are imposed when the caller passes @slot_nr ==
101 * -1. First, the check for existing %struct pci_slot is skipped, as the 183 * -1. First, we no longer check for an existing %struct pci_slot, as there
102 * caller may know about several unpopulated slots on a given %struct 184 * may be many slots with @slot_nr of -1. The other change in semantics is
103 * pci_bus, and each slot would have a @slot_nr of -1. Uniqueness for
104 * these slots is then determined by the @name parameter. We expect
105 * kobject_init_and_add() to warn us if the caller attempts to create
106 * multiple slots with the same name. The other change in semantics is
107 * user-visible, which is the 'address' parameter presented in sysfs will 185 * user-visible, which is the 'address' parameter presented in sysfs will
108 * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the 186 * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the
109 * %struct pci_bus and bb is the bus number. In other words, the devfn of 187 * %struct pci_bus and bb is the bus number. In other words, the devfn of
110 * the 'placeholder' slot will not be displayed. 188 * the 'placeholder' slot will not be displayed.
111 */ 189 */
112
113struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, 190struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
114 const char *name) 191 const char *name,
192 struct hotplug_slot *hotplug)
115{ 193{
116 struct pci_dev *dev; 194 struct pci_dev *dev;
117 struct pci_slot *slot; 195 struct pci_slot *slot;
118 int err; 196 int err = 0;
197 char *slot_name = NULL;
119 198
120 down_write(&pci_bus_sem); 199 down_write(&pci_bus_sem);
121 200
122 if (slot_nr == -1) 201 if (slot_nr == -1)
123 goto placeholder; 202 goto placeholder;
124 203
125 /* If we've already created this slot, bump refcount and return. */ 204 /*
126 list_for_each_entry(slot, &parent->slots, list) { 205 * Hotplug drivers are allowed to rename an existing slot,
127 if (slot->number == slot_nr) { 206 * but only if not already claimed.
128 kobject_get(&slot->kobj); 207 */
129 pr_debug("%s: inc refcount to %d on %04x:%02x:%02x\n", 208 slot = get_slot(parent, slot_nr);
130 __func__, 209 if (slot) {
131 atomic_read(&slot->kobj.kref.refcount), 210 if (hotplug) {
132 pci_domain_nr(parent), parent->number, 211 if ((err = slot->hotplug ? -EBUSY : 0)
133 slot_nr); 212 || (err = rename_slot(slot, name))) {
134 goto out; 213 kobject_put(&slot->kobj);
214 slot = NULL;
215 goto err;
216 }
135 } 217 }
218 goto out;
136 } 219 }
137 220
138placeholder: 221placeholder:
139 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 222 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
140 if (!slot) { 223 if (!slot) {
141 slot = ERR_PTR(-ENOMEM); 224 err = -ENOMEM;
142 goto out; 225 goto err;
143 } 226 }
144 227
145 slot->bus = parent; 228 slot->bus = parent;
146 slot->number = slot_nr; 229 slot->number = slot_nr;
147 230
148 slot->kobj.kset = pci_slots_kset; 231 slot->kobj.kset = pci_slots_kset;
149 err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, 232
150 "%s", name); 233 slot_name = make_slot_name(name);
151 if (err) { 234 if (!slot_name) {
152 printk(KERN_ERR "Unable to register kobject %s\n", name); 235 err = -ENOMEM;
153 goto err; 236 goto err;
154 } 237 }
155 238
239 err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
240 "%s", slot_name);
241 if (err)
242 goto err;
243
156 INIT_LIST_HEAD(&slot->list); 244 INIT_LIST_HEAD(&slot->list);
157 list_add(&slot->list, &parent->slots); 245 list_add(&slot->list, &parent->slots);
158 246
@@ -164,10 +252,10 @@ placeholder:
164 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", 252 pr_debug("%s: created pci_slot on %04x:%02x:%02x\n",
165 __func__, pci_domain_nr(parent), parent->number, slot_nr); 253 __func__, pci_domain_nr(parent), parent->number, slot_nr);
166 254
167 out: 255out:
168 up_write(&pci_bus_sem); 256 up_write(&pci_bus_sem);
169 return slot; 257 return slot;
170 err: 258err:
171 kfree(slot); 259 kfree(slot);
172 slot = ERR_PTR(err); 260 slot = ERR_PTR(err);
173 goto out; 261 goto out;
@@ -175,7 +263,7 @@ placeholder:
175EXPORT_SYMBOL_GPL(pci_create_slot); 263EXPORT_SYMBOL_GPL(pci_create_slot);
176 264
177/** 265/**
178 * pci_update_slot_number - update %struct pci_slot -> number 266 * pci_renumber_slot - update %struct pci_slot -> number
179 * @slot - %struct pci_slot to update 267 * @slot - %struct pci_slot to update
180 * @slot_nr - new number for slot 268 * @slot_nr - new number for slot
181 * 269 *
@@ -183,27 +271,22 @@ EXPORT_SYMBOL_GPL(pci_create_slot);
183 * created a placeholder slot in pci_create_slot() by passing a -1 as 271 * created a placeholder slot in pci_create_slot() by passing a -1 as
184 * slot_nr, to update their %struct pci_slot with the correct @slot_nr. 272 * slot_nr, to update their %struct pci_slot with the correct @slot_nr.
185 */ 273 */
186 274void pci_renumber_slot(struct pci_slot *slot, int slot_nr)
187void pci_update_slot_number(struct pci_slot *slot, int slot_nr)
188{ 275{
189 int name_count = 0;
190 struct pci_slot *tmp; 276 struct pci_slot *tmp;
191 277
192 down_write(&pci_bus_sem); 278 down_write(&pci_bus_sem);
193 279
194 list_for_each_entry(tmp, &slot->bus->slots, list) { 280 list_for_each_entry(tmp, &slot->bus->slots, list) {
195 WARN_ON(tmp->number == slot_nr); 281 WARN_ON(tmp->number == slot_nr);
196 if (!strcmp(kobject_name(&tmp->kobj), kobject_name(&slot->kobj))) 282 goto out;
197 name_count++;
198 } 283 }
199 284
200 if (name_count > 1)
201 printk(KERN_WARNING "pci_update_slot_number found %d slots with the same name: %s\n", name_count, kobject_name(&slot->kobj));
202
203 slot->number = slot_nr; 285 slot->number = slot_nr;
286out:
204 up_write(&pci_bus_sem); 287 up_write(&pci_bus_sem);
205} 288}
206EXPORT_SYMBOL_GPL(pci_update_slot_number); 289EXPORT_SYMBOL_GPL(pci_renumber_slot);
207 290
208/** 291/**
209 * pci_destroy_slot - decrement refcount for physical PCI slot 292 * pci_destroy_slot - decrement refcount for physical PCI slot
@@ -213,7 +296,6 @@ EXPORT_SYMBOL_GPL(pci_update_slot_number);
213 * just call kobject_put on its kobj and let our release methods do the 296 * just call kobject_put on its kobj and let our release methods do the
214 * rest. 297 * rest.
215 */ 298 */
216
217void pci_destroy_slot(struct pci_slot *slot) 299void pci_destroy_slot(struct pci_slot *slot)
218{ 300{
219 pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__, 301 pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__,
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index b46c60b72708..23e492bf75cf 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -70,7 +70,7 @@ pxa2xx-obj-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
70pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o 70pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
71pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o 71pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o
72pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o 72pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o
73pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps.o 73pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
74pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o 74pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
75pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o 75pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
76 76
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 671a7d634d8b..8abbb2020af9 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -246,6 +246,16 @@ config RTC_DRV_TWL92330
246 platforms. The support is integrated with the rest of 246 platforms. The support is integrated with the rest of
247 the Menelaus driver; it's not separate module. 247 the Menelaus driver; it's not separate module.
248 248
249config RTC_DRV_TWL4030
250 tristate "TI TWL4030/TWL5030/TPS659x0"
251 depends on RTC_CLASS && TWL4030_CORE
252 help
253 If you say yes here you get support for the RTC on the
254 TWL4030 family chips, used mostly with OMAP3 platforms.
255
256 This driver can also be built as a module. If so, the module
257 will be called rtc-twl4030.
258
249config RTC_DRV_S35390A 259config RTC_DRV_S35390A
250 tristate "Seiko Instruments S-35390A" 260 tristate "Seiko Instruments S-35390A"
251 select BITREVERSE 261 select BITREVERSE
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index d6a9ac7176ea..e9e8474cc8fe 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
63obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o 63obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
64obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o 64obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
65obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o 65obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
66obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl4030.o
66obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o 67obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
67obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o 68obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
68obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o 69obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c
new file mode 100644
index 000000000000..abe87a4d2665
--- /dev/null
+++ b/drivers/rtc/rtc-twl4030.c
@@ -0,0 +1,564 @@
1/*
2 * rtc-twl4030.c -- TWL4030 Real Time Clock interface
3 *
4 * Copyright (C) 2007 MontaVista Software, Inc
5 * Author: Alexandre Rusev <source@mvista.com>
6 *
7 * Based on original TI driver twl4030-rtc.c
8 * Copyright (C) 2006 Texas Instruments, Inc.
9 *
10 * Based on rtc-omap.c
11 * Copyright (C) 2003 MontaVista Software, Inc.
12 * Author: George G. Davis <gdavis@mvista.com> or <source@mvista.com>
13 * Copyright (C) 2006 David Brownell
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/platform_device.h>
28#include <linux/interrupt.h>
29
30#include <linux/i2c/twl4030.h>
31
32
33/*
34 * RTC block register offsets (use TWL_MODULE_RTC)
35 */
36#define REG_SECONDS_REG 0x00
37#define REG_MINUTES_REG 0x01
38#define REG_HOURS_REG 0x02
39#define REG_DAYS_REG 0x03
40#define REG_MONTHS_REG 0x04
41#define REG_YEARS_REG 0x05
42#define REG_WEEKS_REG 0x06
43
44#define REG_ALARM_SECONDS_REG 0x07
45#define REG_ALARM_MINUTES_REG 0x08
46#define REG_ALARM_HOURS_REG 0x09
47#define REG_ALARM_DAYS_REG 0x0A
48#define REG_ALARM_MONTHS_REG 0x0B
49#define REG_ALARM_YEARS_REG 0x0C
50
51#define REG_RTC_CTRL_REG 0x0D
52#define REG_RTC_STATUS_REG 0x0E
53#define REG_RTC_INTERRUPTS_REG 0x0F
54
55#define REG_RTC_COMP_LSB_REG 0x10
56#define REG_RTC_COMP_MSB_REG 0x11
57
58/* RTC_CTRL_REG bitfields */
59#define BIT_RTC_CTRL_REG_STOP_RTC_M 0x01
60#define BIT_RTC_CTRL_REG_ROUND_30S_M 0x02
61#define BIT_RTC_CTRL_REG_AUTO_COMP_M 0x04
62#define BIT_RTC_CTRL_REG_MODE_12_24_M 0x08
63#define BIT_RTC_CTRL_REG_TEST_MODE_M 0x10
64#define BIT_RTC_CTRL_REG_SET_32_COUNTER_M 0x20
65#define BIT_RTC_CTRL_REG_GET_TIME_M 0x40
66
67/* RTC_STATUS_REG bitfields */
68#define BIT_RTC_STATUS_REG_RUN_M 0x02
69#define BIT_RTC_STATUS_REG_1S_EVENT_M 0x04
70#define BIT_RTC_STATUS_REG_1M_EVENT_M 0x08
71#define BIT_RTC_STATUS_REG_1H_EVENT_M 0x10
72#define BIT_RTC_STATUS_REG_1D_EVENT_M 0x20
73#define BIT_RTC_STATUS_REG_ALARM_M 0x40
74#define BIT_RTC_STATUS_REG_POWER_UP_M 0x80
75
76/* RTC_INTERRUPTS_REG bitfields */
77#define BIT_RTC_INTERRUPTS_REG_EVERY_M 0x03
78#define BIT_RTC_INTERRUPTS_REG_IT_TIMER_M 0x04
79#define BIT_RTC_INTERRUPTS_REG_IT_ALARM_M 0x08
80
81
82/* REG_SECONDS_REG through REG_YEARS_REG is how many registers? */
83#define ALL_TIME_REGS 6
84
85/*----------------------------------------------------------------------*/
86
87/*
88 * Supports 1 byte read from TWL4030 RTC register.
89 */
90static int twl4030_rtc_read_u8(u8 *data, u8 reg)
91{
92 int ret;
93
94 ret = twl4030_i2c_read_u8(TWL4030_MODULE_RTC, data, reg);
95 if (ret < 0)
96 pr_err("twl4030_rtc: Could not read TWL4030"
97 "register %X - error %d\n", reg, ret);
98 return ret;
99}
100
101/*
102 * Supports 1 byte write to TWL4030 RTC registers.
103 */
104static int twl4030_rtc_write_u8(u8 data, u8 reg)
105{
106 int ret;
107
108 ret = twl4030_i2c_write_u8(TWL4030_MODULE_RTC, data, reg);
109 if (ret < 0)
110 pr_err("twl4030_rtc: Could not write TWL4030"
111 "register %X - error %d\n", reg, ret);
112 return ret;
113}
114
115/*
116 * Cache the value for timer/alarm interrupts register; this is
117 * only changed by callers holding rtc ops lock (or resume).
118 */
119static unsigned char rtc_irq_bits;
120
121/*
122 * Enable timer and/or alarm interrupts.
123 */
124static int set_rtc_irq_bit(unsigned char bit)
125{
126 unsigned char val;
127 int ret;
128
129 val = rtc_irq_bits | bit;
130 ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
131 if (ret == 0)
132 rtc_irq_bits = val;
133
134 return ret;
135}
136
137/*
138 * Disable timer and/or alarm interrupts.
139 */
140static int mask_rtc_irq_bit(unsigned char bit)
141{
142 unsigned char val;
143 int ret;
144
145 val = rtc_irq_bits & ~bit;
146 ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
147 if (ret == 0)
148 rtc_irq_bits = val;
149
150 return ret;
151}
152
153static inline int twl4030_rtc_alarm_irq_set_state(int enabled)
154{
155 int ret;
156
157 if (enabled)
158 ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
159 else
160 ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
161
162 return ret;
163}
164
165static inline int twl4030_rtc_irq_set_state(int enabled)
166{
167 int ret;
168
169 if (enabled)
170 ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
171 else
172 ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
173
174 return ret;
175}
176
177/*
178 * Gets current TWL4030 RTC time and date parameters.
179 *
180 * The RTC's time/alarm representation is not what gmtime(3) requires
181 * Linux to use:
182 *
183 * - Months are 1..12 vs Linux 0-11
184 * - Years are 0..99 vs Linux 1900..N (we assume 21st century)
185 */
186static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm)
187{
188 unsigned char rtc_data[ALL_TIME_REGS + 1];
189 int ret;
190 u8 save_control;
191
192 ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
193 if (ret < 0)
194 return ret;
195
196 save_control |= BIT_RTC_CTRL_REG_GET_TIME_M;
197
198 ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
199 if (ret < 0)
200 return ret;
201
202 ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
203 REG_SECONDS_REG, ALL_TIME_REGS);
204
205 if (ret < 0) {
206 dev_err(dev, "rtc_read_time error %d\n", ret);
207 return ret;
208 }
209
210 tm->tm_sec = bcd2bin(rtc_data[0]);
211 tm->tm_min = bcd2bin(rtc_data[1]);
212 tm->tm_hour = bcd2bin(rtc_data[2]);
213 tm->tm_mday = bcd2bin(rtc_data[3]);
214 tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
215 tm->tm_year = bcd2bin(rtc_data[5]) + 100;
216
217 return ret;
218}
219
220static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm)
221{
222 unsigned char save_control;
223 unsigned char rtc_data[ALL_TIME_REGS + 1];
224 int ret;
225
226 rtc_data[1] = bin2bcd(tm->tm_sec);
227 rtc_data[2] = bin2bcd(tm->tm_min);
228 rtc_data[3] = bin2bcd(tm->tm_hour);
229 rtc_data[4] = bin2bcd(tm->tm_mday);
230 rtc_data[5] = bin2bcd(tm->tm_mon + 1);
231 rtc_data[6] = bin2bcd(tm->tm_year - 100);
232
233 /* Stop RTC while updating the TC registers */
234 ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
235 if (ret < 0)
236 goto out;
237
238 save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M;
239 twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
240 if (ret < 0)
241 goto out;
242
243 /* update all the time registers in one shot */
244 ret = twl4030_i2c_write(TWL4030_MODULE_RTC, rtc_data,
245 REG_SECONDS_REG, ALL_TIME_REGS);
246 if (ret < 0) {
247 dev_err(dev, "rtc_set_time error %d\n", ret);
248 goto out;
249 }
250
251 /* Start back RTC */
252 save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M;
253 ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
254
255out:
256 return ret;
257}
258
259/*
260 * Gets current TWL4030 RTC alarm time.
261 */
262static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
263{
264 unsigned char rtc_data[ALL_TIME_REGS + 1];
265 int ret;
266
267 ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
268 REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
269 if (ret < 0) {
270 dev_err(dev, "rtc_read_alarm error %d\n", ret);
271 return ret;
272 }
273
274 /* some of these fields may be wildcard/"match all" */
275 alm->time.tm_sec = bcd2bin(rtc_data[0]);
276 alm->time.tm_min = bcd2bin(rtc_data[1]);
277 alm->time.tm_hour = bcd2bin(rtc_data[2]);
278 alm->time.tm_mday = bcd2bin(rtc_data[3]);
279 alm->time.tm_mon = bcd2bin(rtc_data[4]) - 1;
280 alm->time.tm_year = bcd2bin(rtc_data[5]) + 100;
281
282 /* report cached alarm enable state */
283 if (rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M)
284 alm->enabled = 1;
285
286 return ret;
287}
288
289static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
290{
291 unsigned char alarm_data[ALL_TIME_REGS + 1];
292 int ret;
293
294 ret = twl4030_rtc_alarm_irq_set_state(0);
295 if (ret)
296 goto out;
297
298 alarm_data[1] = bin2bcd(alm->time.tm_sec);
299 alarm_data[2] = bin2bcd(alm->time.tm_min);
300 alarm_data[3] = bin2bcd(alm->time.tm_hour);
301 alarm_data[4] = bin2bcd(alm->time.tm_mday);
302 alarm_data[5] = bin2bcd(alm->time.tm_mon + 1);
303 alarm_data[6] = bin2bcd(alm->time.tm_year - 100);
304
305 /* update all the alarm registers in one shot */
306 ret = twl4030_i2c_write(TWL4030_MODULE_RTC, alarm_data,
307 REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
308 if (ret) {
309 dev_err(dev, "rtc_set_alarm error %d\n", ret);
310 goto out;
311 }
312
313 if (alm->enabled)
314 ret = twl4030_rtc_alarm_irq_set_state(1);
315out:
316 return ret;
317}
318
319#ifdef CONFIG_RTC_INTF_DEV
320
321static int twl4030_rtc_ioctl(struct device *dev, unsigned int cmd,
322 unsigned long arg)
323{
324 switch (cmd) {
325 case RTC_AIE_OFF:
326 return twl4030_rtc_alarm_irq_set_state(0);
327 case RTC_AIE_ON:
328 return twl4030_rtc_alarm_irq_set_state(1);
329 case RTC_UIE_OFF:
330 return twl4030_rtc_irq_set_state(0);
331 case RTC_UIE_ON:
332 return twl4030_rtc_irq_set_state(1);
333
334 default:
335 return -ENOIOCTLCMD;
336 }
337}
338
339#else
340#define omap_rtc_ioctl NULL
341#endif
342
343static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
344{
345 unsigned long events = 0;
346 int ret = IRQ_NONE;
347 int res;
348 u8 rd_reg;
349
350#ifdef CONFIG_LOCKDEP
351 /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
352 * we don't want and can't tolerate. Although it might be
353 * friendlier not to borrow this thread context...
354 */
355 local_irq_enable();
356#endif
357
358 res = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
359 if (res)
360 goto out;
361 /*
362 * Figure out source of interrupt: ALARM or TIMER in RTC_STATUS_REG.
363 * only one (ALARM or RTC) interrupt source may be enabled
364 * at time, we also could check our results
365 * by reading RTS_INTERRUPTS_REGISTER[IT_TIMER,IT_ALARM]
366 */
367 if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M)
368 events |= RTC_IRQF | RTC_AF;
369 else
370 events |= RTC_IRQF | RTC_UF;
371
372 res = twl4030_rtc_write_u8(rd_reg | BIT_RTC_STATUS_REG_ALARM_M,
373 REG_RTC_STATUS_REG);
374 if (res)
375 goto out;
376
377 /* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1
378 * needs 2 reads to clear the interrupt. One read is done in
379 * do_twl4030_pwrirq(). Doing the second read, to clear
380 * the bit.
381 *
382 * FIXME the reason PWR_ISR1 needs an extra read is that
383 * RTC_IF retriggered until we cleared REG_ALARM_M above.
384 * But re-reading like this is a bad hack; by doing so we
385 * risk wrongly clearing status for some other IRQ (losing
386 * the interrupt). Be smarter about handling RTC_UF ...
387 */
388 res = twl4030_i2c_read_u8(TWL4030_MODULE_INT,
389 &rd_reg, TWL4030_INT_PWR_ISR1);
390 if (res)
391 goto out;
392
393 /* Notify RTC core on event */
394 rtc_update_irq(rtc, 1, events);
395
396 ret = IRQ_HANDLED;
397out:
398 return ret;
399}
400
401static struct rtc_class_ops twl4030_rtc_ops = {
402 .ioctl = twl4030_rtc_ioctl,
403 .read_time = twl4030_rtc_read_time,
404 .set_time = twl4030_rtc_set_time,
405 .read_alarm = twl4030_rtc_read_alarm,
406 .set_alarm = twl4030_rtc_set_alarm,
407};
408
409/*----------------------------------------------------------------------*/
410
411static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
412{
413 struct rtc_device *rtc;
414 int ret = 0;
415 int irq = platform_get_irq(pdev, 0);
416 u8 rd_reg;
417
418 if (irq < 0)
419 return irq;
420
421 rtc = rtc_device_register(pdev->name,
422 &pdev->dev, &twl4030_rtc_ops, THIS_MODULE);
423 if (IS_ERR(rtc)) {
424 ret = -EINVAL;
425 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
426 PTR_ERR(rtc));
427 goto out0;
428
429 }
430
431 platform_set_drvdata(pdev, rtc);
432
433 ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
434
435 if (ret < 0)
436 goto out1;
437
438 if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M)
439 dev_warn(&pdev->dev, "Power up reset detected.\n");
440
441 if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M)
442 dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n");
443
444 /* Clear RTC Power up reset and pending alarm interrupts */
445 ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
446 if (ret < 0)
447 goto out1;
448
449 ret = request_irq(irq, twl4030_rtc_interrupt,
450 IRQF_TRIGGER_RISING,
451 rtc->dev.bus_id, rtc);
452 if (ret < 0) {
453 dev_err(&pdev->dev, "IRQ is not free.\n");
454 goto out1;
455 }
456
457 /* Check RTC module status, Enable if it is off */
458 ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
459 if (ret < 0)
460 goto out2;
461
462 if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
463 dev_info(&pdev->dev, "Enabling TWL4030-RTC.\n");
464 rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
465 ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
466 if (ret < 0)
467 goto out2;
468 }
469
470 /* init cached IRQ enable bits */
471 ret = twl4030_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
472 if (ret < 0)
473 goto out2;
474
475 return ret;
476
477
478out2:
479 free_irq(irq, rtc);
480out1:
481 rtc_device_unregister(rtc);
482out0:
483 return ret;
484}
485
486/*
487 * Disable all TWL4030 RTC module interrupts.
488 * Sets status flag to free.
489 */
490static int __devexit twl4030_rtc_remove(struct platform_device *pdev)
491{
492 /* leave rtc running, but disable irqs */
493 struct rtc_device *rtc = platform_get_drvdata(pdev);
494 int irq = platform_get_irq(pdev, 0);
495
496 mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
497 mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
498
499 free_irq(irq, rtc);
500
501 rtc_device_unregister(rtc);
502 platform_set_drvdata(pdev, NULL);
503 return 0;
504}
505
506static void twl4030_rtc_shutdown(struct platform_device *pdev)
507{
508 mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M |
509 BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
510}
511
512#ifdef CONFIG_PM
513
514static unsigned char irqstat;
515
516static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state)
517{
518 irqstat = rtc_irq_bits;
519
520 /* REVISIT alarm may need to wake us from sleep */
521 mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M |
522 BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
523 return 0;
524}
525
526static int twl4030_rtc_resume(struct platform_device *pdev)
527{
528 set_rtc_irq_bit(irqstat);
529 return 0;
530}
531
532#else
533#define twl4030_rtc_suspend NULL
534#define twl4030_rtc_resume NULL
535#endif
536
537MODULE_ALIAS("platform:twl4030_rtc");
538
539static struct platform_driver twl4030rtc_driver = {
540 .probe = twl4030_rtc_probe,
541 .remove = __devexit_p(twl4030_rtc_remove),
542 .shutdown = twl4030_rtc_shutdown,
543 .suspend = twl4030_rtc_suspend,
544 .resume = twl4030_rtc_resume,
545 .driver = {
546 .owner = THIS_MODULE,
547 .name = "twl4030_rtc",
548 },
549};
550
551static int __init twl4030_rtc_init(void)
552{
553 return platform_driver_register(&twl4030rtc_driver);
554}
555module_init(twl4030_rtc_init);
556
557static void __exit twl4030_rtc_exit(void)
558{
559 platform_driver_unregister(&twl4030rtc_driver);
560}
561module_exit(twl4030_rtc_exit);
562
563MODULE_AUTHOR("Texas Instruments, MontaVista Software");
564MODULE_LICENSE("GPL");
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index c014ffb110e9..5450a0e5ecdb 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -1100,6 +1100,8 @@ enum pci_board_num_t {
1100 pbn_b0_4_1843200_200, 1100 pbn_b0_4_1843200_200,
1101 pbn_b0_8_1843200_200, 1101 pbn_b0_8_1843200_200,
1102 1102
1103 pbn_b0_1_4000000,
1104
1103 pbn_b0_bt_1_115200, 1105 pbn_b0_bt_1_115200,
1104 pbn_b0_bt_2_115200, 1106 pbn_b0_bt_2_115200,
1105 pbn_b0_bt_8_115200, 1107 pbn_b0_bt_8_115200,
@@ -1167,6 +1169,10 @@ enum pci_board_num_t {
1167 pbn_exsys_4055, 1169 pbn_exsys_4055,
1168 pbn_plx_romulus, 1170 pbn_plx_romulus,
1169 pbn_oxsemi, 1171 pbn_oxsemi,
1172 pbn_oxsemi_1_4000000,
1173 pbn_oxsemi_2_4000000,
1174 pbn_oxsemi_4_4000000,
1175 pbn_oxsemi_8_4000000,
1170 pbn_intel_i960, 1176 pbn_intel_i960,
1171 pbn_sgi_ioc3, 1177 pbn_sgi_ioc3,
1172 pbn_computone_4, 1178 pbn_computone_4,
@@ -1290,6 +1296,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
1290 .base_baud = 1843200, 1296 .base_baud = 1843200,
1291 .uart_offset = 0x200, 1297 .uart_offset = 0x200,
1292 }, 1298 },
1299 [pbn_b0_1_4000000] = {
1300 .flags = FL_BASE0,
1301 .num_ports = 1,
1302 .base_baud = 4000000,
1303 .uart_offset = 8,
1304 },
1293 1305
1294 [pbn_b0_bt_1_115200] = { 1306 [pbn_b0_bt_1_115200] = {
1295 .flags = FL_BASE0|FL_BASE_BARS, 1307 .flags = FL_BASE0|FL_BASE_BARS,
@@ -1625,6 +1637,35 @@ static struct pciserial_board pci_boards[] __devinitdata = {
1625 .base_baud = 115200, 1637 .base_baud = 115200,
1626 .uart_offset = 8, 1638 .uart_offset = 8,
1627 }, 1639 },
1640 [pbn_oxsemi_1_4000000] = {
1641 .flags = FL_BASE0,
1642 .num_ports = 1,
1643 .base_baud = 4000000,
1644 .uart_offset = 0x200,
1645 .first_offset = 0x1000,
1646 },
1647 [pbn_oxsemi_2_4000000] = {
1648 .flags = FL_BASE0,
1649 .num_ports = 2,
1650 .base_baud = 4000000,
1651 .uart_offset = 0x200,
1652 .first_offset = 0x1000,
1653 },
1654 [pbn_oxsemi_4_4000000] = {
1655 .flags = FL_BASE0,
1656 .num_ports = 4,
1657 .base_baud = 4000000,
1658 .uart_offset = 0x200,
1659 .first_offset = 0x1000,
1660 },
1661 [pbn_oxsemi_8_4000000] = {
1662 .flags = FL_BASE0,
1663 .num_ports = 8,
1664 .base_baud = 4000000,
1665 .uart_offset = 0x200,
1666 .first_offset = 0x1000,
1667 },
1668
1628 1669
1629 /* 1670 /*
1630 * EKF addition for i960 Boards form EKF with serial port. 1671 * EKF addition for i960 Boards form EKF with serial port.
@@ -1813,6 +1854,39 @@ serial_pci_matches(struct pciserial_board *board,
1813 board->first_offset == guessed->first_offset; 1854 board->first_offset == guessed->first_offset;
1814} 1855}
1815 1856
1857/*
1858 * Oxford Semiconductor Inc.
1859 * Check that device is part of the Tornado range of devices, then determine
1860 * the number of ports available on the device.
1861 */
1862static int pci_oxsemi_tornado_init(struct pci_dev *dev, struct pciserial_board *board)
1863{
1864 u8 __iomem *p;
1865 unsigned long deviceID;
1866 unsigned int number_uarts;
1867
1868 /* OxSemi Tornado devices are all 0xCxxx */
1869 if (dev->vendor == PCI_VENDOR_ID_OXSEMI &&
1870 (dev->device & 0xF000) != 0xC000)
1871 return 0;
1872
1873 p = pci_iomap(dev, 0, 5);
1874 if (p == NULL)
1875 return -ENOMEM;
1876
1877 deviceID = ioread32(p);
1878 /* Tornado device */
1879 if (deviceID == 0x07000200) {
1880 number_uarts = ioread8(p + 4);
1881 board->num_ports = number_uarts;
1882 printk(KERN_DEBUG
1883 "%d ports detected on Oxford PCI Express device\n",
1884 number_uarts);
1885 }
1886 pci_iounmap(dev, p);
1887 return 0;
1888}
1889
1816struct serial_private * 1890struct serial_private *
1817pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board) 1891pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board)
1818{ 1892{
@@ -1821,6 +1895,13 @@ pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board)
1821 struct pci_serial_quirk *quirk; 1895 struct pci_serial_quirk *quirk;
1822 int rc, nr_ports, i; 1896 int rc, nr_ports, i;
1823 1897
1898 /*
1899 * Find number of ports on board
1900 */
1901 if (dev->vendor == PCI_VENDOR_ID_OXSEMI ||
1902 dev->vendor == PCI_VENDOR_ID_MAINPINE)
1903 pci_oxsemi_tornado_init(dev, board);
1904
1824 nr_ports = board->num_ports; 1905 nr_ports = board->num_ports;
1825 1906
1826 /* 1907 /*
@@ -2301,6 +2382,156 @@ static struct pci_device_id serial_pci_tbl[] = {
2301 pbn_b0_bt_2_921600 }, 2382 pbn_b0_bt_2_921600 },
2302 2383
2303 /* 2384 /*
2385 * Oxford Semiconductor Inc. Tornado PCI express device range.
2386 */
2387 { PCI_VENDOR_ID_OXSEMI, 0xc101, /* OXPCIe952 1 Legacy UART */
2388 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2389 pbn_b0_1_4000000 },
2390 { PCI_VENDOR_ID_OXSEMI, 0xc105, /* OXPCIe952 1 Legacy UART */
2391 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2392 pbn_b0_1_4000000 },
2393 { PCI_VENDOR_ID_OXSEMI, 0xc11b, /* OXPCIe952 1 Native UART */
2394 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2395 pbn_oxsemi_1_4000000 },
2396 { PCI_VENDOR_ID_OXSEMI, 0xc11f, /* OXPCIe952 1 Native UART */
2397 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2398 pbn_oxsemi_1_4000000 },
2399 { PCI_VENDOR_ID_OXSEMI, 0xc120, /* OXPCIe952 1 Legacy UART */
2400 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2401 pbn_b0_1_4000000 },
2402 { PCI_VENDOR_ID_OXSEMI, 0xc124, /* OXPCIe952 1 Legacy UART */
2403 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2404 pbn_b0_1_4000000 },
2405 { PCI_VENDOR_ID_OXSEMI, 0xc138, /* OXPCIe952 1 Native UART */
2406 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2407 pbn_oxsemi_1_4000000 },
2408 { PCI_VENDOR_ID_OXSEMI, 0xc13d, /* OXPCIe952 1 Native UART */
2409 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2410 pbn_oxsemi_1_4000000 },
2411 { PCI_VENDOR_ID_OXSEMI, 0xc140, /* OXPCIe952 1 Legacy UART */
2412 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2413 pbn_b0_1_4000000 },
2414 { PCI_VENDOR_ID_OXSEMI, 0xc141, /* OXPCIe952 1 Legacy UART */
2415 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2416 pbn_b0_1_4000000 },
2417 { PCI_VENDOR_ID_OXSEMI, 0xc144, /* OXPCIe952 1 Legacy UART */
2418 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2419 pbn_b0_1_4000000 },
2420 { PCI_VENDOR_ID_OXSEMI, 0xc145, /* OXPCIe952 1 Legacy UART */
2421 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2422 pbn_b0_1_4000000 },
2423 { PCI_VENDOR_ID_OXSEMI, 0xc158, /* OXPCIe952 2 Native UART */
2424 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2425 pbn_oxsemi_2_4000000 },
2426 { PCI_VENDOR_ID_OXSEMI, 0xc15d, /* OXPCIe952 2 Native UART */
2427 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2428 pbn_oxsemi_2_4000000 },
2429 { PCI_VENDOR_ID_OXSEMI, 0xc208, /* OXPCIe954 4 Native UART */
2430 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2431 pbn_oxsemi_4_4000000 },
2432 { PCI_VENDOR_ID_OXSEMI, 0xc20d, /* OXPCIe954 4 Native UART */
2433 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2434 pbn_oxsemi_4_4000000 },
2435 { PCI_VENDOR_ID_OXSEMI, 0xc308, /* OXPCIe958 8 Native UART */
2436 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2437 pbn_oxsemi_8_4000000 },
2438 { PCI_VENDOR_ID_OXSEMI, 0xc30d, /* OXPCIe958 8 Native UART */
2439 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2440 pbn_oxsemi_8_4000000 },
2441 { PCI_VENDOR_ID_OXSEMI, 0xc40b, /* OXPCIe200 1 Native UART */
2442 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2443 pbn_oxsemi_1_4000000 },
2444 { PCI_VENDOR_ID_OXSEMI, 0xc40f, /* OXPCIe200 1 Native UART */
2445 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2446 pbn_oxsemi_1_4000000 },
2447 { PCI_VENDOR_ID_OXSEMI, 0xc41b, /* OXPCIe200 1 Native UART */
2448 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2449 pbn_oxsemi_1_4000000 },
2450 { PCI_VENDOR_ID_OXSEMI, 0xc41f, /* OXPCIe200 1 Native UART */
2451 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2452 pbn_oxsemi_1_4000000 },
2453 { PCI_VENDOR_ID_OXSEMI, 0xc42b, /* OXPCIe200 1 Native UART */
2454 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2455 pbn_oxsemi_1_4000000 },
2456 { PCI_VENDOR_ID_OXSEMI, 0xc42f, /* OXPCIe200 1 Native UART */
2457 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2458 pbn_oxsemi_1_4000000 },
2459 { PCI_VENDOR_ID_OXSEMI, 0xc43b, /* OXPCIe200 1 Native UART */
2460 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2461 pbn_oxsemi_1_4000000 },
2462 { PCI_VENDOR_ID_OXSEMI, 0xc43f, /* OXPCIe200 1 Native UART */
2463 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2464 pbn_oxsemi_1_4000000 },
2465 { PCI_VENDOR_ID_OXSEMI, 0xc44b, /* OXPCIe200 1 Native UART */
2466 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2467 pbn_oxsemi_1_4000000 },
2468 { PCI_VENDOR_ID_OXSEMI, 0xc44f, /* OXPCIe200 1 Native UART */
2469 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2470 pbn_oxsemi_1_4000000 },
2471 { PCI_VENDOR_ID_OXSEMI, 0xc45b, /* OXPCIe200 1 Native UART */
2472 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2473 pbn_oxsemi_1_4000000 },
2474 { PCI_VENDOR_ID_OXSEMI, 0xc45f, /* OXPCIe200 1 Native UART */
2475 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2476 pbn_oxsemi_1_4000000 },
2477 { PCI_VENDOR_ID_OXSEMI, 0xc46b, /* OXPCIe200 1 Native UART */
2478 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2479 pbn_oxsemi_1_4000000 },
2480 { PCI_VENDOR_ID_OXSEMI, 0xc46f, /* OXPCIe200 1 Native UART */
2481 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2482 pbn_oxsemi_1_4000000 },
2483 { PCI_VENDOR_ID_OXSEMI, 0xc47b, /* OXPCIe200 1 Native UART */
2484 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2485 pbn_oxsemi_1_4000000 },
2486 { PCI_VENDOR_ID_OXSEMI, 0xc47f, /* OXPCIe200 1 Native UART */
2487 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2488 pbn_oxsemi_1_4000000 },
2489 { PCI_VENDOR_ID_OXSEMI, 0xc48b, /* OXPCIe200 1 Native UART */
2490 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2491 pbn_oxsemi_1_4000000 },
2492 { PCI_VENDOR_ID_OXSEMI, 0xc48f, /* OXPCIe200 1 Native UART */
2493 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2494 pbn_oxsemi_1_4000000 },
2495 { PCI_VENDOR_ID_OXSEMI, 0xc49b, /* OXPCIe200 1 Native UART */
2496 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2497 pbn_oxsemi_1_4000000 },
2498 { PCI_VENDOR_ID_OXSEMI, 0xc49f, /* OXPCIe200 1 Native UART */
2499 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2500 pbn_oxsemi_1_4000000 },
2501 { PCI_VENDOR_ID_OXSEMI, 0xc4ab, /* OXPCIe200 1 Native UART */
2502 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2503 pbn_oxsemi_1_4000000 },
2504 { PCI_VENDOR_ID_OXSEMI, 0xc4af, /* OXPCIe200 1 Native UART */
2505 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2506 pbn_oxsemi_1_4000000 },
2507 { PCI_VENDOR_ID_OXSEMI, 0xc4bb, /* OXPCIe200 1 Native UART */
2508 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2509 pbn_oxsemi_1_4000000 },
2510 { PCI_VENDOR_ID_OXSEMI, 0xc4bf, /* OXPCIe200 1 Native UART */
2511 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2512 pbn_oxsemi_1_4000000 },
2513 { PCI_VENDOR_ID_OXSEMI, 0xc4cb, /* OXPCIe200 1 Native UART */
2514 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2515 pbn_oxsemi_1_4000000 },
2516 { PCI_VENDOR_ID_OXSEMI, 0xc4cf, /* OXPCIe200 1 Native UART */
2517 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2518 pbn_oxsemi_1_4000000 },
2519 /*
2520 * Mainpine Inc. IQ Express "Rev3" utilizing OxSemi Tornado
2521 */
2522 { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 1 Port V.34 Super-G3 Fax */
2523 PCI_VENDOR_ID_MAINPINE, 0x4001, 0, 0,
2524 pbn_oxsemi_1_4000000 },
2525 { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 2 Port V.34 Super-G3 Fax */
2526 PCI_VENDOR_ID_MAINPINE, 0x4002, 0, 0,
2527 pbn_oxsemi_2_4000000 },
2528 { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 4 Port V.34 Super-G3 Fax */
2529 PCI_VENDOR_ID_MAINPINE, 0x4004, 0, 0,
2530 pbn_oxsemi_4_4000000 },
2531 { PCI_VENDOR_ID_MAINPINE, 0x4000, /* IQ Express 8 Port V.34 Super-G3 Fax */
2532 PCI_VENDOR_ID_MAINPINE, 0x4008, 0, 0,
2533 pbn_oxsemi_8_4000000 },
2534 /*
2304 * SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards, 2535 * SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards,
2305 * from skokodyn@yahoo.com 2536 * from skokodyn@yahoo.com
2306 */ 2537 */
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index db783b77a881..c94d3c4b7521 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -457,7 +457,7 @@ config SERIAL_SAMSUNG
457 457
458config SERIAL_SAMSUNG_DEBUG 458config SERIAL_SAMSUNG_DEBUG
459 bool "Samsung SoC serial debug" 459 bool "Samsung SoC serial debug"
460 depends on SERIAL_SAMSUNG 460 depends on SERIAL_SAMSUNG && DEBUG_LL
461 help 461 help
462 Add support for debugging the serial driver. Since this is 462 Add support for debugging the serial driver. Since this is
463 generally being used as a console, we use our own output 463 generally being used as a console, we use our own output
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 2a79decd7dfc..c4eff44c9f27 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -43,4 +43,8 @@ source "drivers/staging/echo/Kconfig"
43 43
44source "drivers/staging/at76_usb/Kconfig" 44source "drivers/staging/at76_usb/Kconfig"
45 45
46source "drivers/staging/pcc-acpi/Kconfig"
47
48source "drivers/staging/poch/Kconfig"
49
46endif # STAGING 50endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 325bca4f71c0..7cb8701d96d4 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -13,3 +13,5 @@ obj-$(CONFIG_W35UND) += winbond/
13obj-$(CONFIG_PRISM2_USB) += wlan-ng/ 13obj-$(CONFIG_PRISM2_USB) += wlan-ng/
14obj-$(CONFIG_ECHO) += echo/ 14obj-$(CONFIG_ECHO) += echo/
15obj-$(CONFIG_USB_ATMEL) += at76_usb/ 15obj-$(CONFIG_USB_ATMEL) += at76_usb/
16obj-$(CONFIG_PCC_ACPI) += pcc-acpi/
17obj-$(CONFIG_POCH) += poch/
diff --git a/drivers/staging/at76_usb/at76_usb.c b/drivers/staging/at76_usb/at76_usb.c
index 52df0c665183..174e2bec9223 100644
--- a/drivers/staging/at76_usb/at76_usb.c
+++ b/drivers/staging/at76_usb/at76_usb.c
@@ -2319,9 +2319,11 @@ static int at76_iw_handler_get_scan(struct net_device *netdev,
2319 if (!iwe) 2319 if (!iwe)
2320 return -ENOMEM; 2320 return -ENOMEM;
2321 2321
2322 if (priv->scan_state != SCAN_COMPLETED) 2322 if (priv->scan_state != SCAN_COMPLETED) {
2323 /* scan not yet finished */ 2323 /* scan not yet finished */
2324 kfree(iwe);
2324 return -EAGAIN; 2325 return -EAGAIN;
2326 }
2325 2327
2326 spin_lock_irqsave(&priv->bss_list_spinlock, flags); 2328 spin_lock_irqsave(&priv->bss_list_spinlock, flags);
2327 2329
diff --git a/drivers/staging/echo/bit_operations.h b/drivers/staging/echo/bit_operations.h
index b32f4bf99397..cecdcf3fd755 100644
--- a/drivers/staging/echo/bit_operations.h
+++ b/drivers/staging/echo/bit_operations.h
@@ -30,114 +30,98 @@
30#if !defined(_BIT_OPERATIONS_H_) 30#if !defined(_BIT_OPERATIONS_H_)
31#define _BIT_OPERATIONS_H_ 31#define _BIT_OPERATIONS_H_
32 32
33#ifdef __cplusplus
34extern "C" {
35#endif
36
37#if defined(__i386__) || defined(__x86_64__) 33#if defined(__i386__) || defined(__x86_64__)
38/*! \brief Find the bit position of the highest set bit in a word 34/*! \brief Find the bit position of the highest set bit in a word
39 \param bits The word to be searched 35 \param bits The word to be searched
40 \return The bit number of the highest set bit, or -1 if the word is zero. */ 36 \return The bit number of the highest set bit, or -1 if the word is zero. */
41static __inline__ int top_bit(unsigned int bits) 37static __inline__ int top_bit(unsigned int bits)
42{ 38{
43 int res; 39 int res;
44 40
45 __asm__ (" xorl %[res],%[res];\n" 41 __asm__(" xorl %[res],%[res];\n"
46 " decl %[res];\n" 42 " decl %[res];\n"
47 " bsrl %[bits],%[res]\n" 43 " bsrl %[bits],%[res]\n"
48 : [res] "=&r" (res) 44 :[res] "=&r" (res)
49 : [bits] "rm" (bits)); 45 :[bits] "rm"(bits)
50 return res; 46 );
47 return res;
51} 48}
52/*- End of function --------------------------------------------------------*/
53 49
54/*! \brief Find the bit position of the lowest set bit in a word 50/*! \brief Find the bit position of the lowest set bit in a word
55 \param bits The word to be searched 51 \param bits The word to be searched
56 \return The bit number of the lowest set bit, or -1 if the word is zero. */ 52 \return The bit number of the lowest set bit, or -1 if the word is zero. */
57static __inline__ int bottom_bit(unsigned int bits) 53static __inline__ int bottom_bit(unsigned int bits)
58{ 54{
59 int res; 55 int res;
60 56
61 __asm__ (" xorl %[res],%[res];\n" 57 __asm__(" xorl %[res],%[res];\n"
62 " decl %[res];\n" 58 " decl %[res];\n"
63 " bsfl %[bits],%[res]\n" 59 " bsfl %[bits],%[res]\n"
64 : [res] "=&r" (res) 60 :[res] "=&r" (res)
65 : [bits] "rm" (bits)); 61 :[bits] "rm"(bits)
66 return res; 62 );
63 return res;
67} 64}
68/*- End of function --------------------------------------------------------*/
69#else 65#else
70static __inline__ int top_bit(unsigned int bits) 66static __inline__ int top_bit(unsigned int bits)
71{ 67{
72 int i; 68 int i;
73 69
74 if (bits == 0) 70 if (bits == 0)
75 return -1; 71 return -1;
76 i = 0; 72 i = 0;
77 if (bits & 0xFFFF0000) 73 if (bits & 0xFFFF0000) {
78 { 74 bits &= 0xFFFF0000;
79 bits &= 0xFFFF0000; 75 i += 16;
80 i += 16; 76 }
81 } 77 if (bits & 0xFF00FF00) {
82 if (bits & 0xFF00FF00) 78 bits &= 0xFF00FF00;
83 { 79 i += 8;
84 bits &= 0xFF00FF00; 80 }
85 i += 8; 81 if (bits & 0xF0F0F0F0) {
86 } 82 bits &= 0xF0F0F0F0;
87 if (bits & 0xF0F0F0F0) 83 i += 4;
88 { 84 }
89 bits &= 0xF0F0F0F0; 85 if (bits & 0xCCCCCCCC) {
90 i += 4; 86 bits &= 0xCCCCCCCC;
91 } 87 i += 2;
92 if (bits & 0xCCCCCCCC) 88 }
93 { 89 if (bits & 0xAAAAAAAA) {
94 bits &= 0xCCCCCCCC; 90 bits &= 0xAAAAAAAA;
95 i += 2; 91 i += 1;
96 } 92 }
97 if (bits & 0xAAAAAAAA) 93 return i;
98 {
99 bits &= 0xAAAAAAAA;
100 i += 1;
101 }
102 return i;
103} 94}
104/*- End of function --------------------------------------------------------*/
105 95
106static __inline__ int bottom_bit(unsigned int bits) 96static __inline__ int bottom_bit(unsigned int bits)
107{ 97{
108 int i; 98 int i;
109 99
110 if (bits == 0) 100 if (bits == 0)
111 return -1; 101 return -1;
112 i = 32; 102 i = 32;
113 if (bits & 0x0000FFFF) 103 if (bits & 0x0000FFFF) {
114 { 104 bits &= 0x0000FFFF;
115 bits &= 0x0000FFFF; 105 i -= 16;
116 i -= 16; 106 }
117 } 107 if (bits & 0x00FF00FF) {
118 if (bits & 0x00FF00FF) 108 bits &= 0x00FF00FF;
119 { 109 i -= 8;
120 bits &= 0x00FF00FF; 110 }
121 i -= 8; 111 if (bits & 0x0F0F0F0F) {
122 } 112 bits &= 0x0F0F0F0F;
123 if (bits & 0x0F0F0F0F) 113 i -= 4;
124 { 114 }
125 bits &= 0x0F0F0F0F; 115 if (bits & 0x33333333) {
126 i -= 4; 116 bits &= 0x33333333;
127 } 117 i -= 2;
128 if (bits & 0x33333333) 118 }
129 { 119 if (bits & 0x55555555) {
130 bits &= 0x33333333; 120 bits &= 0x55555555;
131 i -= 2; 121 i -= 1;
132 } 122 }
133 if (bits & 0x55555555) 123 return i;
134 {
135 bits &= 0x55555555;
136 i -= 1;
137 }
138 return i;
139} 124}
140/*- End of function --------------------------------------------------------*/
141#endif 125#endif
142 126
143/*! \brief Bit reverse a byte. 127/*! \brief Bit reverse a byte.
@@ -146,16 +130,16 @@ static __inline__ int bottom_bit(unsigned int bits)
146static __inline__ uint8_t bit_reverse8(uint8_t x) 130static __inline__ uint8_t bit_reverse8(uint8_t x)
147{ 131{
148#if defined(__i386__) || defined(__x86_64__) 132#if defined(__i386__) || defined(__x86_64__)
149 /* If multiply is fast */ 133 /* If multiply is fast */
150 return ((x*0x0802U & 0x22110U) | (x*0x8020U & 0x88440U))*0x10101U >> 16; 134 return ((x * 0x0802U & 0x22110U) | (x * 0x8020U & 0x88440U)) *
135 0x10101U >> 16;
151#else 136#else
152 /* If multiply is slow, but we have a barrel shifter */ 137 /* If multiply is slow, but we have a barrel shifter */
153 x = (x >> 4) | (x << 4); 138 x = (x >> 4) | (x << 4);
154 x = ((x & 0xCC) >> 2) | ((x & 0x33) << 2); 139 x = ((x & 0xCC) >> 2) | ((x & 0x33) << 2);
155 return ((x & 0xAA) >> 1) | ((x & 0x55) << 1); 140 return ((x & 0xAA) >> 1) | ((x & 0x55) << 1);
156#endif 141#endif
157} 142}
158/*- End of function --------------------------------------------------------*/
159 143
160/*! \brief Bit reverse a 16 bit word. 144/*! \brief Bit reverse a 16 bit word.
161 \param data The word to be reversed. 145 \param data The word to be reversed.
@@ -193,9 +177,8 @@ uint16_t make_mask16(uint16_t x);
193 \return The word with the single set bit. */ 177 \return The word with the single set bit. */
194static __inline__ uint32_t least_significant_one32(uint32_t x) 178static __inline__ uint32_t least_significant_one32(uint32_t x)
195{ 179{
196 return (x & (-(int32_t) x)); 180 return (x & (-(int32_t) x));
197} 181}
198/*- End of function --------------------------------------------------------*/
199 182
200/*! \brief Find the most significant one in a word, and return a word 183/*! \brief Find the most significant one in a word, and return a word
201 with just that bit set. 184 with just that bit set.
@@ -204,50 +187,42 @@ static __inline__ uint32_t least_significant_one32(uint32_t x)
204static __inline__ uint32_t most_significant_one32(uint32_t x) 187static __inline__ uint32_t most_significant_one32(uint32_t x)
205{ 188{
206#if defined(__i386__) || defined(__x86_64__) 189#if defined(__i386__) || defined(__x86_64__)
207 return 1 << top_bit(x); 190 return 1 << top_bit(x);
208#else 191#else
209 x = make_mask32(x); 192 x = make_mask32(x);
210 return (x ^ (x >> 1)); 193 return (x ^ (x >> 1));
211#endif 194#endif
212} 195}
213/*- End of function --------------------------------------------------------*/
214 196
215/*! \brief Find the parity of a byte. 197/*! \brief Find the parity of a byte.
216 \param x The byte to be checked. 198 \param x The byte to be checked.
217 \return 1 for odd, or 0 for even. */ 199 \return 1 for odd, or 0 for even. */
218static __inline__ int parity8(uint8_t x) 200static __inline__ int parity8(uint8_t x)
219{ 201{
220 x = (x ^ (x >> 4)) & 0x0F; 202 x = (x ^ (x >> 4)) & 0x0F;
221 return (0x6996 >> x) & 1; 203 return (0x6996 >> x) & 1;
222} 204}
223/*- End of function --------------------------------------------------------*/
224 205
225/*! \brief Find the parity of a 16 bit word. 206/*! \brief Find the parity of a 16 bit word.
226 \param x The word to be checked. 207 \param x The word to be checked.
227 \return 1 for odd, or 0 for even. */ 208 \return 1 for odd, or 0 for even. */
228static __inline__ int parity16(uint16_t x) 209static __inline__ int parity16(uint16_t x)
229{ 210{
230 x ^= (x >> 8); 211 x ^= (x >> 8);
231 x = (x ^ (x >> 4)) & 0x0F; 212 x = (x ^ (x >> 4)) & 0x0F;
232 return (0x6996 >> x) & 1; 213 return (0x6996 >> x) & 1;
233} 214}
234/*- End of function --------------------------------------------------------*/
235 215
236/*! \brief Find the parity of a 32 bit word. 216/*! \brief Find the parity of a 32 bit word.
237 \param x The word to be checked. 217 \param x The word to be checked.
238 \return 1 for odd, or 0 for even. */ 218 \return 1 for odd, or 0 for even. */
239static __inline__ int parity32(uint32_t x) 219static __inline__ int parity32(uint32_t x)
240{ 220{
241 x ^= (x >> 16); 221 x ^= (x >> 16);
242 x ^= (x >> 8); 222 x ^= (x >> 8);
243 x = (x ^ (x >> 4)) & 0x0F; 223 x = (x ^ (x >> 4)) & 0x0F;
244 return (0x6996 >> x) & 1; 224 return (0x6996 >> x) & 1;
245} 225}
246/*- End of function --------------------------------------------------------*/
247
248#ifdef __cplusplus
249}
250#endif
251 226
252#endif 227#endif
253/*- End of file ------------------------------------------------------------*/ 228/*- End of file ------------------------------------------------------------*/
diff --git a/drivers/staging/echo/echo.c b/drivers/staging/echo/echo.c
index 4a281b14fc58..b8f2c5e9dee5 100644
--- a/drivers/staging/echo/echo.c
+++ b/drivers/staging/echo/echo.c
@@ -74,7 +74,6 @@
74 74
75 Steve also has some nice notes on echo cancellers in echo.h 75 Steve also has some nice notes on echo cancellers in echo.h
76 76
77
78 References: 77 References:
79 78
80 [1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo 79 [1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo
@@ -105,20 +104,18 @@
105 Mark, Pawel, and Pavel. 104 Mark, Pawel, and Pavel.
106*/ 105*/
107 106
108#include <linux/kernel.h> /* We're doing kernel work */ 107#include <linux/kernel.h> /* We're doing kernel work */
109#include <linux/module.h> 108#include <linux/module.h>
110#include <linux/kernel.h> 109#include <linux/kernel.h>
111#include <linux/slab.h> 110#include <linux/slab.h>
112#define malloc(a) kmalloc((a), GFP_KERNEL)
113#define free(a) kfree(a)
114 111
115#include "bit_operations.h" 112#include "bit_operations.h"
116#include "echo.h" 113#include "echo.h"
117 114
118#define MIN_TX_POWER_FOR_ADAPTION 64 115#define MIN_TX_POWER_FOR_ADAPTION 64
119#define MIN_RX_POWER_FOR_ADAPTION 64 116#define MIN_RX_POWER_FOR_ADAPTION 64
120#define DTD_HANGOVER 600 /* 600 samples, or 75ms */ 117#define DTD_HANGOVER 600 /* 600 samples, or 75ms */
121#define DC_LOG2BETA 3 /* log2() of DC filter Beta */ 118#define DC_LOG2BETA 3 /* log2() of DC filter Beta */
122 119
123/*-----------------------------------------------------------------------*\ 120/*-----------------------------------------------------------------------*\
124 FUNCTIONS 121 FUNCTIONS
@@ -126,59 +123,58 @@
126 123
127/* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */ 124/* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */
128 125
129 126#ifdef __bfin__
130#ifdef __BLACKFIN_ASM__ 127static void __inline__ lms_adapt_bg(struct oslec_state *ec, int clean,
131static void __inline__ lms_adapt_bg(echo_can_state_t *ec, int clean, int shift) 128 int shift)
132{ 129{
133 int i, j; 130 int i, j;
134 int offset1; 131 int offset1;
135 int offset2; 132 int offset2;
136 int factor; 133 int factor;
137 int exp; 134 int exp;
138 int16_t *phist; 135 int16_t *phist;
139 int n; 136 int n;
140 137
141 if (shift > 0) 138 if (shift > 0)
142 factor = clean << shift; 139 factor = clean << shift;
143 else 140 else
144 factor = clean >> -shift; 141 factor = clean >> -shift;
145 142
146 /* Update the FIR taps */ 143 /* Update the FIR taps */
147 144
148 offset2 = ec->curr_pos; 145 offset2 = ec->curr_pos;
149 offset1 = ec->taps - offset2; 146 offset1 = ec->taps - offset2;
150 phist = &ec->fir_state_bg.history[offset2]; 147 phist = &ec->fir_state_bg.history[offset2];
151 148
152 /* st: and en: help us locate the assembler in echo.s */ 149 /* st: and en: help us locate the assembler in echo.s */
153 150
154 //asm("st:"); 151 //asm("st:");
155 n = ec->taps; 152 n = ec->taps;
156 for (i = 0, j = offset2; i < n; i++, j++) 153 for (i = 0, j = offset2; i < n; i++, j++) {
157 { 154 exp = *phist++ * factor;
158 exp = *phist++ * factor; 155 ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
159 ec->fir_taps16[1][i] += (int16_t) ((exp+(1<<14)) >> 15); 156 }
160 } 157 //asm("en:");
161 //asm("en:"); 158
162 159 /* Note the asm for the inner loop above generated by Blackfin gcc
163 /* Note the asm for the inner loop above generated by Blackfin gcc 160 4.1.1 is pretty good (note even parallel instructions used):
164 4.1.1 is pretty good (note even parallel instructions used): 161
165 162 R0 = W [P0++] (X);
166 R0 = W [P0++] (X); 163 R0 *= R2;
167 R0 *= R2; 164 R0 = R0 + R3 (NS) ||
168 R0 = R0 + R3 (NS) || 165 R1 = W [P1] (X) ||
169 R1 = W [P1] (X) || 166 nop;
170 nop; 167 R0 >>>= 15;
171 R0 >>>= 15; 168 R0 = R0 + R1;
172 R0 = R0 + R1; 169 W [P1++] = R0;
173 W [P1++] = R0; 170
174 171 A block based update algorithm would be much faster but the
175 A block based update algorithm would be much faster but the 172 above can't be improved on much. Every instruction saved in
176 above can't be improved on much. Every instruction saved in 173 the loop above is 2 MIPs/ch! The for loop above is where the
177 the loop above is 2 MIPs/ch! The for loop above is where the 174 Blackfin spends most of it's time - about 17 MIPs/ch measured
178 Blackfin spends most of it's time - about 17 MIPs/ch measured 175 with speedtest.c with 256 taps (32ms). Write-back and
179 with speedtest.c with 256 taps (32ms). Write-back and 176 Write-through cache gave about the same performance.
180 Write-through cache gave about the same performance. 177 */
181 */
182} 178}
183 179
184/* 180/*
@@ -200,392 +196,393 @@ static void __inline__ lms_adapt_bg(echo_can_state_t *ec, int clean, int shift)
200*/ 196*/
201 197
202#else 198#else
203static __inline__ void lms_adapt_bg(echo_can_state_t *ec, int clean, int shift) 199static __inline__ void lms_adapt_bg(struct oslec_state *ec, int clean,
200 int shift)
204{ 201{
205 int i; 202 int i;
206 203
207 int offset1; 204 int offset1;
208 int offset2; 205 int offset2;
209 int factor; 206 int factor;
210 int exp; 207 int exp;
211 208
212 if (shift > 0) 209 if (shift > 0)
213 factor = clean << shift; 210 factor = clean << shift;
214 else 211 else
215 factor = clean >> -shift; 212 factor = clean >> -shift;
216 213
217 /* Update the FIR taps */ 214 /* Update the FIR taps */
218 215
219 offset2 = ec->curr_pos; 216 offset2 = ec->curr_pos;
220 offset1 = ec->taps - offset2; 217 offset1 = ec->taps - offset2;
221 218
222 for (i = ec->taps - 1; i >= offset1; i--) 219 for (i = ec->taps - 1; i >= offset1; i--) {
223 { 220 exp = (ec->fir_state_bg.history[i - offset1] * factor);
224 exp = (ec->fir_state_bg.history[i - offset1]*factor); 221 ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
225 ec->fir_taps16[1][i] += (int16_t) ((exp+(1<<14)) >> 15); 222 }
226 } 223 for (; i >= 0; i--) {
227 for ( ; i >= 0; i--) 224 exp = (ec->fir_state_bg.history[i + offset2] * factor);
228 { 225 ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
229 exp = (ec->fir_state_bg.history[i + offset2]*factor); 226 }
230 ec->fir_taps16[1][i] += (int16_t) ((exp+(1<<14)) >> 15);
231 }
232} 227}
233#endif 228#endif
234 229
235/*- End of function --------------------------------------------------------*/ 230struct oslec_state *oslec_create(int len, int adaption_mode)
236
237echo_can_state_t *echo_can_create(int len, int adaption_mode)
238{ 231{
239 echo_can_state_t *ec; 232 struct oslec_state *ec;
240 int i; 233 int i;
241 int j; 234
242 235 ec = kzalloc(sizeof(*ec), GFP_KERNEL);
243 ec = kmalloc(sizeof(*ec), GFP_KERNEL); 236 if (!ec)
244 if (ec == NULL) 237 return NULL;
245 return NULL; 238
246 memset(ec, 0, sizeof(*ec)); 239 ec->taps = len;
247 240 ec->log2taps = top_bit(len);
248 ec->taps = len; 241 ec->curr_pos = ec->taps - 1;
249 ec->log2taps = top_bit(len); 242
250 ec->curr_pos = ec->taps - 1; 243 for (i = 0; i < 2; i++) {
251 244 ec->fir_taps16[i] =
252 for (i = 0; i < 2; i++) 245 kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
253 { 246 if (!ec->fir_taps16[i])
254 if ((ec->fir_taps16[i] = (int16_t *) malloc((ec->taps)*sizeof(int16_t))) == NULL) 247 goto error_oom;
255 { 248 }
256 for (j = 0; j < i; j++) 249
257 kfree(ec->fir_taps16[j]); 250 fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps);
258 kfree(ec); 251 fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps);
259 return NULL; 252
260 } 253 for (i = 0; i < 5; i++) {
261 memset(ec->fir_taps16[i], 0, (ec->taps)*sizeof(int16_t)); 254 ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0;
262 } 255 }
263 256
264 fir16_create(&ec->fir_state, 257 ec->cng_level = 1000;
265 ec->fir_taps16[0], 258 oslec_adaption_mode(ec, adaption_mode);
266 ec->taps); 259
267 fir16_create(&ec->fir_state_bg, 260 ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
268 ec->fir_taps16[1], 261 if (!ec->snapshot)
269 ec->taps); 262 goto error_oom;
270 263
271 for(i=0; i<5; i++) { 264 ec->cond_met = 0;
272 ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0; 265 ec->Pstates = 0;
273 } 266 ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
274 267 ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
275 ec->cng_level = 1000; 268 ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
276 echo_can_adaption_mode(ec, adaption_mode); 269 ec->Lbgn = ec->Lbgn_acc = 0;
277 270 ec->Lbgn_upper = 200;
278 ec->snapshot = (int16_t*)malloc(ec->taps*sizeof(int16_t)); 271 ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
279 memset(ec->snapshot, 0, sizeof(int16_t)*ec->taps); 272
280 273 return ec;
281 ec->cond_met = 0; 274
282 ec->Pstates = 0; 275 error_oom:
283 ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0; 276 for (i = 0; i < 2; i++)
284 ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0; 277 kfree(ec->fir_taps16[i]);
285 ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; 278
286 ec->Lbgn = ec->Lbgn_acc = 0; 279 kfree(ec);
287 ec->Lbgn_upper = 200; 280 return NULL;
288 ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
289
290 return ec;
291} 281}
292/*- End of function --------------------------------------------------------*/
293 282
294void echo_can_free(echo_can_state_t *ec) 283EXPORT_SYMBOL_GPL(oslec_create);
284
285void oslec_free(struct oslec_state *ec)
295{ 286{
296 int i; 287 int i;
297 288
298 fir16_free(&ec->fir_state); 289 fir16_free(&ec->fir_state);
299 fir16_free(&ec->fir_state_bg); 290 fir16_free(&ec->fir_state_bg);
300 for (i = 0; i < 2; i++) 291 for (i = 0; i < 2; i++)
301 kfree(ec->fir_taps16[i]); 292 kfree(ec->fir_taps16[i]);
302 kfree(ec->snapshot); 293 kfree(ec->snapshot);
303 kfree(ec); 294 kfree(ec);
304} 295}
305/*- End of function --------------------------------------------------------*/
306 296
307void echo_can_adaption_mode(echo_can_state_t *ec, int adaption_mode) 297EXPORT_SYMBOL_GPL(oslec_free);
298
299void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode)
308{ 300{
309 ec->adaption_mode = adaption_mode; 301 ec->adaption_mode = adaption_mode;
310} 302}
311/*- End of function --------------------------------------------------------*/
312 303
313void echo_can_flush(echo_can_state_t *ec) 304EXPORT_SYMBOL_GPL(oslec_adaption_mode);
305
306void oslec_flush(struct oslec_state *ec)
314{ 307{
315 int i; 308 int i;
316 309
317 ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0; 310 ec->Ltxacc = ec->Lrxacc = ec->Lcleanacc = ec->Lclean_bgacc = 0;
318 ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0; 311 ec->Ltx = ec->Lrx = ec->Lclean = ec->Lclean_bg = 0;
319 ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0; 312 ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
320 313
321 ec->Lbgn = ec->Lbgn_acc = 0; 314 ec->Lbgn = ec->Lbgn_acc = 0;
322 ec->Lbgn_upper = 200; 315 ec->Lbgn_upper = 200;
323 ec->Lbgn_upper_acc = ec->Lbgn_upper << 13; 316 ec->Lbgn_upper_acc = ec->Lbgn_upper << 13;
324 317
325 ec->nonupdate_dwell = 0; 318 ec->nonupdate_dwell = 0;
326 319
327 fir16_flush(&ec->fir_state); 320 fir16_flush(&ec->fir_state);
328 fir16_flush(&ec->fir_state_bg); 321 fir16_flush(&ec->fir_state_bg);
329 ec->fir_state.curr_pos = ec->taps - 1; 322 ec->fir_state.curr_pos = ec->taps - 1;
330 ec->fir_state_bg.curr_pos = ec->taps - 1; 323 ec->fir_state_bg.curr_pos = ec->taps - 1;
331 for (i = 0; i < 2; i++) 324 for (i = 0; i < 2; i++)
332 memset(ec->fir_taps16[i], 0, ec->taps*sizeof(int16_t)); 325 memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t));
333 326
334 ec->curr_pos = ec->taps - 1; 327 ec->curr_pos = ec->taps - 1;
335 ec->Pstates = 0; 328 ec->Pstates = 0;
336} 329}
337/*- End of function --------------------------------------------------------*/
338 330
339void echo_can_snapshot(echo_can_state_t *ec) { 331EXPORT_SYMBOL_GPL(oslec_flush);
340 memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps*sizeof(int16_t)); 332
333void oslec_snapshot(struct oslec_state *ec)
334{
335 memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t));
341} 336}
342/*- End of function --------------------------------------------------------*/ 337
338EXPORT_SYMBOL_GPL(oslec_snapshot);
343 339
344/* Dual Path Echo Canceller ------------------------------------------------*/ 340/* Dual Path Echo Canceller ------------------------------------------------*/
345 341
346int16_t echo_can_update(echo_can_state_t *ec, int16_t tx, int16_t rx) 342int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
347{ 343{
348 int32_t echo_value; 344 int32_t echo_value;
349 int clean_bg; 345 int clean_bg;
350 int tmp, tmp1; 346 int tmp, tmp1;
351 347
352 /* Input scaling was found be required to prevent problems when tx 348 /* Input scaling was found be required to prevent problems when tx
353 starts clipping. Another possible way to handle this would be the 349 starts clipping. Another possible way to handle this would be the
354 filter coefficent scaling. */ 350 filter coefficent scaling. */
355 351
356 ec->tx = tx; ec->rx = rx; 352 ec->tx = tx;
357 tx >>=1; 353 ec->rx = rx;
358 rx >>=1; 354 tx >>= 1;
359 355 rx >>= 1;
360 /* 356
361 Filter DC, 3dB point is 160Hz (I think), note 32 bit precision required 357 /*
362 otherwise values do not track down to 0. Zero at DC, Pole at (1-Beta) 358 Filter DC, 3dB point is 160Hz (I think), note 32 bit precision required
363 only real axis. Some chip sets (like Si labs) don't need 359 otherwise values do not track down to 0. Zero at DC, Pole at (1-Beta)
364 this, but something like a $10 X100P card does. Any DC really slows 360 only real axis. Some chip sets (like Si labs) don't need
365 down convergence. 361 this, but something like a $10 X100P card does. Any DC really slows
366 362 down convergence.
367 Note: removes some low frequency from the signal, this reduces 363
368 the speech quality when listening to samples through headphones 364 Note: removes some low frequency from the signal, this reduces
369 but may not be obvious through a telephone handset. 365 the speech quality when listening to samples through headphones
370 366 but may not be obvious through a telephone handset.
371 Note that the 3dB frequency in radians is approx Beta, e.g. for 367
372 Beta = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz. 368 Note that the 3dB frequency in radians is approx Beta, e.g. for
373 */ 369 Beta = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz.
374 370 */
375 if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) { 371
376 tmp = rx << 15; 372 if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) {
373 tmp = rx << 15;
377#if 1 374#if 1
378 /* Make sure the gain of the HPF is 1.0. This can still saturate a little under 375 /* Make sure the gain of the HPF is 1.0. This can still saturate a little under
379 impulse conditions, and it might roll to 32768 and need clipping on sustained peak 376 impulse conditions, and it might roll to 32768 and need clipping on sustained peak
380 level signals. However, the scale of such clipping is small, and the error due to 377 level signals. However, the scale of such clipping is small, and the error due to
381 any saturation should not markedly affect the downstream processing. */ 378 any saturation should not markedly affect the downstream processing. */
382 tmp -= (tmp >> 4); 379 tmp -= (tmp >> 4);
383#endif 380#endif
384 ec->rx_1 += -(ec->rx_1>>DC_LOG2BETA) + tmp - ec->rx_2; 381 ec->rx_1 += -(ec->rx_1 >> DC_LOG2BETA) + tmp - ec->rx_2;
382
383 /* hard limit filter to prevent clipping. Note that at this stage
384 rx should be limited to +/- 16383 due to right shift above */
385 tmp1 = ec->rx_1 >> 15;
386 if (tmp1 > 16383)
387 tmp1 = 16383;
388 if (tmp1 < -16383)
389 tmp1 = -16383;
390 rx = tmp1;
391 ec->rx_2 = tmp;
392 }
385 393
386 /* hard limit filter to prevent clipping. Note that at this stage 394 /* Block average of power in the filter states. Used for
387 rx should be limited to +/- 16383 due to right shift above */ 395 adaption power calculation. */
388 tmp1 = ec->rx_1 >> 15;
389 if (tmp1 > 16383) tmp1 = 16383;
390 if (tmp1 < -16383) tmp1 = -16383;
391 rx = tmp1;
392 ec->rx_2 = tmp;
393 }
394 396
395 /* Block average of power in the filter states. Used for 397 {
396 adaption power calculation. */ 398 int new, old;
399
400 /* efficient "out with the old and in with the new" algorithm so
401 we don't have to recalculate over the whole block of
402 samples. */
403 new = (int)tx *(int)tx;
404 old = (int)ec->fir_state.history[ec->fir_state.curr_pos] *
405 (int)ec->fir_state.history[ec->fir_state.curr_pos];
406 ec->Pstates +=
407 ((new - old) + (1 << ec->log2taps)) >> ec->log2taps;
408 if (ec->Pstates < 0)
409 ec->Pstates = 0;
410 }
397 411
398 { 412 /* Calculate short term average levels using simple single pole IIRs */
399 int new, old;
400 413
401 /* efficient "out with the old and in with the new" algorithm so 414 ec->Ltxacc += abs(tx) - ec->Ltx;
402 we don't have to recalculate over the whole block of 415 ec->Ltx = (ec->Ltxacc + (1 << 4)) >> 5;
403 samples. */ 416 ec->Lrxacc += abs(rx) - ec->Lrx;
404 new = (int)tx * (int)tx; 417 ec->Lrx = (ec->Lrxacc + (1 << 4)) >> 5;
405 old = (int)ec->fir_state.history[ec->fir_state.curr_pos] *
406 (int)ec->fir_state.history[ec->fir_state.curr_pos];
407 ec->Pstates += ((new - old) + (1<<ec->log2taps)) >> ec->log2taps;
408 if (ec->Pstates < 0) ec->Pstates = 0;
409 }
410
411 /* Calculate short term average levels using simple single pole IIRs */
412
413 ec->Ltxacc += abs(tx) - ec->Ltx;
414 ec->Ltx = (ec->Ltxacc + (1<<4)) >> 5;
415 ec->Lrxacc += abs(rx) - ec->Lrx;
416 ec->Lrx = (ec->Lrxacc + (1<<4)) >> 5;
417
418 /* Foreground filter ---------------------------------------------------*/
419
420 ec->fir_state.coeffs = ec->fir_taps16[0];
421 echo_value = fir16(&ec->fir_state, tx);
422 ec->clean = rx - echo_value;
423 ec->Lcleanacc += abs(ec->clean) - ec->Lclean;
424 ec->Lclean = (ec->Lcleanacc + (1<<4)) >> 5;
425
426 /* Background filter ---------------------------------------------------*/
427
428 echo_value = fir16(&ec->fir_state_bg, tx);
429 clean_bg = rx - echo_value;
430 ec->Lclean_bgacc += abs(clean_bg) - ec->Lclean_bg;
431 ec->Lclean_bg = (ec->Lclean_bgacc + (1<<4)) >> 5;
432
433 /* Background Filter adaption -----------------------------------------*/
434
435 /* Almost always adap bg filter, just simple DT and energy
436 detection to minimise adaption in cases of strong double talk.
437 However this is not critical for the dual path algorithm.
438 */
439 ec->factor = 0;
440 ec->shift = 0;
441 if ((ec->nonupdate_dwell == 0)) {
442 int P, logP, shift;
443
444 /* Determine:
445
446 f = Beta * clean_bg_rx/P ------ (1)
447
448 where P is the total power in the filter states.
449
450 The Boffins have shown that if we obey (1) we converge
451 quickly and avoid instability.
452
453 The correct factor f must be in Q30, as this is the fixed
454 point format required by the lms_adapt_bg() function,
455 therefore the scaled version of (1) is:
456
457 (2^30) * f = (2^30) * Beta * clean_bg_rx/P
458 factor = (2^30) * Beta * clean_bg_rx/P ----- (2)
459
460 We have chosen Beta = 0.25 by experiment, so:
461
462 factor = (2^30) * (2^-2) * clean_bg_rx/P
463
464 (30 - 2 - log2(P))
465 factor = clean_bg_rx 2 ----- (3)
466
467 To avoid a divide we approximate log2(P) as top_bit(P),
468 which returns the position of the highest non-zero bit in
469 P. This approximation introduces an error as large as a
470 factor of 2, but the algorithm seems to handle it OK.
471
472 Come to think of it a divide may not be a big deal on a
473 modern DSP, so its probably worth checking out the cycles
474 for a divide versus a top_bit() implementation.
475 */
476
477 P = MIN_TX_POWER_FOR_ADAPTION + ec->Pstates;
478 logP = top_bit(P) + ec->log2taps;
479 shift = 30 - 2 - logP;
480 ec->shift = shift;
481
482 lms_adapt_bg(ec, clean_bg, shift);
483 }
484
485 /* very simple DTD to make sure we dont try and adapt with strong
486 near end speech */
487
488 ec->adapt = 0;
489 if ((ec->Lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->Lrx > ec->Ltx))
490 ec->nonupdate_dwell = DTD_HANGOVER;
491 if (ec->nonupdate_dwell)
492 ec->nonupdate_dwell--;
493 418
494 /* Transfer logic ------------------------------------------------------*/ 419 /* Foreground filter --------------------------------------------------- */
495 420
496 /* These conditions are from the dual path paper [1], I messed with 421 ec->fir_state.coeffs = ec->fir_taps16[0];
497 them a bit to improve performance. */ 422 echo_value = fir16(&ec->fir_state, tx);
423 ec->clean = rx - echo_value;
424 ec->Lcleanacc += abs(ec->clean) - ec->Lclean;
425 ec->Lclean = (ec->Lcleanacc + (1 << 4)) >> 5;
498 426
499 if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) && 427 /* Background filter --------------------------------------------------- */
500 (ec->nonupdate_dwell == 0) &&
501 (8*ec->Lclean_bg < 7*ec->Lclean) /* (ec->Lclean_bg < 0.875*ec->Lclean) */ &&
502 (8*ec->Lclean_bg < ec->Ltx) /* (ec->Lclean_bg < 0.125*ec->Ltx) */ )
503 {
504 if (ec->cond_met == 6) {
505 /* BG filter has had better results for 6 consecutive samples */
506 ec->adapt = 1;
507 memcpy(ec->fir_taps16[0], ec->fir_taps16[1], ec->taps*sizeof(int16_t));
508 }
509 else
510 ec->cond_met++;
511 }
512 else
513 ec->cond_met = 0;
514 428
515 /* Non-Linear Processing ---------------------------------------------------*/ 429 echo_value = fir16(&ec->fir_state_bg, tx);
430 clean_bg = rx - echo_value;
431 ec->Lclean_bgacc += abs(clean_bg) - ec->Lclean_bg;
432 ec->Lclean_bg = (ec->Lclean_bgacc + (1 << 4)) >> 5;
516 433
517 ec->clean_nlp = ec->clean; 434 /* Background Filter adaption ----------------------------------------- */
518 if (ec->adaption_mode & ECHO_CAN_USE_NLP)
519 {
520 /* Non-linear processor - a fancy way to say "zap small signals, to avoid
521 residual echo due to (uLaw/ALaw) non-linearity in the channel.". */
522 435
523 if ((16*ec->Lclean < ec->Ltx)) 436 /* Almost always adap bg filter, just simple DT and energy
524 { 437 detection to minimise adaption in cases of strong double talk.
525 /* Our e/c has improved echo by at least 24 dB (each factor of 2 is 6dB, 438 However this is not critical for the dual path algorithm.
526 so 2*2*2*2=16 is the same as 6+6+6+6=24dB) */ 439 */
527 if (ec->adaption_mode & ECHO_CAN_USE_CNG) 440 ec->factor = 0;
528 { 441 ec->shift = 0;
529 ec->cng_level = ec->Lbgn; 442 if ((ec->nonupdate_dwell == 0)) {
530 443 int P, logP, shift;
531 /* Very elementary comfort noise generation. Just random 444
532 numbers rolled off very vaguely Hoth-like. DR: This 445 /* Determine:
533 noise doesn't sound quite right to me - I suspect there 446
534 are some overlfow issues in the filtering as it's too 447 f = Beta * clean_bg_rx/P ------ (1)
535 "crackly". TODO: debug this, maybe just play noise at 448
536 high level or look at spectrum. 449 where P is the total power in the filter states.
537 */ 450
538 451 The Boffins have shown that if we obey (1) we converge
539 ec->cng_rndnum = 1664525U*ec->cng_rndnum + 1013904223U; 452 quickly and avoid instability.
540 ec->cng_filter = ((ec->cng_rndnum & 0xFFFF) - 32768 + 5*ec->cng_filter) >> 3; 453
541 ec->clean_nlp = (ec->cng_filter*ec->cng_level*8) >> 14; 454 The correct factor f must be in Q30, as this is the fixed
542 455 point format required by the lms_adapt_bg() function,
543 } 456 therefore the scaled version of (1) is:
544 else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) 457
545 { 458 (2^30) * f = (2^30) * Beta * clean_bg_rx/P
546 /* This sounds much better than CNG */ 459 factor = (2^30) * Beta * clean_bg_rx/P ----- (2)
547 if (ec->clean_nlp > ec->Lbgn) 460
548 ec->clean_nlp = ec->Lbgn; 461 We have chosen Beta = 0.25 by experiment, so:
549 if (ec->clean_nlp < -ec->Lbgn) 462
550 ec->clean_nlp = -ec->Lbgn; 463 factor = (2^30) * (2^-2) * clean_bg_rx/P
464
465 (30 - 2 - log2(P))
466 factor = clean_bg_rx 2 ----- (3)
467
468 To avoid a divide we approximate log2(P) as top_bit(P),
469 which returns the position of the highest non-zero bit in
470 P. This approximation introduces an error as large as a
471 factor of 2, but the algorithm seems to handle it OK.
472
473 Come to think of it a divide may not be a big deal on a
474 modern DSP, so its probably worth checking out the cycles
475 for a divide versus a top_bit() implementation.
476 */
477
478 P = MIN_TX_POWER_FOR_ADAPTION + ec->Pstates;
479 logP = top_bit(P) + ec->log2taps;
480 shift = 30 - 2 - logP;
481 ec->shift = shift;
482
483 lms_adapt_bg(ec, clean_bg, shift);
551 } 484 }
552 else 485
553 { 486 /* very simple DTD to make sure we dont try and adapt with strong
554 /* just mute the residual, doesn't sound very good, used mainly 487 near end speech */
555 in G168 tests */ 488
556 ec->clean_nlp = 0; 489 ec->adapt = 0;
557 } 490 if ((ec->Lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->Lrx > ec->Ltx))
558 } 491 ec->nonupdate_dwell = DTD_HANGOVER;
559 else { 492 if (ec->nonupdate_dwell)
560 /* Background noise estimator. I tried a few algorithms 493 ec->nonupdate_dwell--;
561 here without much luck. This very simple one seems to 494
562 work best, we just average the level using a slow (1 sec 495 /* Transfer logic ------------------------------------------------------ */
563 time const) filter if the current level is less than a 496
564 (experimentally derived) constant. This means we dont 497 /* These conditions are from the dual path paper [1], I messed with
565 include high level signals like near end speech. When 498 them a bit to improve performance. */
566 combined with CNG or especially CLIP seems to work OK. 499
567 */ 500 if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) &&
568 if (ec->Lclean < 40) { 501 (ec->nonupdate_dwell == 0) &&
569 ec->Lbgn_acc += abs(ec->clean) - ec->Lbgn; 502 (8 * ec->Lclean_bg <
570 ec->Lbgn = (ec->Lbgn_acc + (1<<11)) >> 12; 503 7 * ec->Lclean) /* (ec->Lclean_bg < 0.875*ec->Lclean) */ &&
571 } 504 (8 * ec->Lclean_bg <
572 } 505 ec->Ltx) /* (ec->Lclean_bg < 0.125*ec->Ltx) */ ) {
573 } 506 if (ec->cond_met == 6) {
574 507 /* BG filter has had better results for 6 consecutive samples */
575 /* Roll around the taps buffer */ 508 ec->adapt = 1;
576 if (ec->curr_pos <= 0) 509 memcpy(ec->fir_taps16[0], ec->fir_taps16[1],
577 ec->curr_pos = ec->taps; 510 ec->taps * sizeof(int16_t));
578 ec->curr_pos--; 511 } else
579 512 ec->cond_met++;
580 if (ec->adaption_mode & ECHO_CAN_DISABLE) 513 } else
581 ec->clean_nlp = rx; 514 ec->cond_met = 0;
582 515
583 /* Output scaled back up again to match input scaling */ 516 /* Non-Linear Processing --------------------------------------------------- */
584 517
585 return (int16_t) ec->clean_nlp << 1; 518 ec->clean_nlp = ec->clean;
519 if (ec->adaption_mode & ECHO_CAN_USE_NLP) {
520 /* Non-linear processor - a fancy way to say "zap small signals, to avoid
521 residual echo due to (uLaw/ALaw) non-linearity in the channel.". */
522
523 if ((16 * ec->Lclean < ec->Ltx)) {
524 /* Our e/c has improved echo by at least 24 dB (each factor of 2 is 6dB,
525 so 2*2*2*2=16 is the same as 6+6+6+6=24dB) */
526 if (ec->adaption_mode & ECHO_CAN_USE_CNG) {
527 ec->cng_level = ec->Lbgn;
528
529 /* Very elementary comfort noise generation. Just random
530 numbers rolled off very vaguely Hoth-like. DR: This
531 noise doesn't sound quite right to me - I suspect there
532 are some overlfow issues in the filtering as it's too
533 "crackly". TODO: debug this, maybe just play noise at
534 high level or look at spectrum.
535 */
536
537 ec->cng_rndnum =
538 1664525U * ec->cng_rndnum + 1013904223U;
539 ec->cng_filter =
540 ((ec->cng_rndnum & 0xFFFF) - 32768 +
541 5 * ec->cng_filter) >> 3;
542 ec->clean_nlp =
543 (ec->cng_filter * ec->cng_level * 8) >> 14;
544
545 } else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) {
546 /* This sounds much better than CNG */
547 if (ec->clean_nlp > ec->Lbgn)
548 ec->clean_nlp = ec->Lbgn;
549 if (ec->clean_nlp < -ec->Lbgn)
550 ec->clean_nlp = -ec->Lbgn;
551 } else {
552 /* just mute the residual, doesn't sound very good, used mainly
553 in G168 tests */
554 ec->clean_nlp = 0;
555 }
556 } else {
557 /* Background noise estimator. I tried a few algorithms
558 here without much luck. This very simple one seems to
559 work best, we just average the level using a slow (1 sec
560 time const) filter if the current level is less than a
561 (experimentally derived) constant. This means we dont
562 include high level signals like near end speech. When
563 combined with CNG or especially CLIP seems to work OK.
564 */
565 if (ec->Lclean < 40) {
566 ec->Lbgn_acc += abs(ec->clean) - ec->Lbgn;
567 ec->Lbgn = (ec->Lbgn_acc + (1 << 11)) >> 12;
568 }
569 }
570 }
571
572 /* Roll around the taps buffer */
573 if (ec->curr_pos <= 0)
574 ec->curr_pos = ec->taps;
575 ec->curr_pos--;
576
577 if (ec->adaption_mode & ECHO_CAN_DISABLE)
578 ec->clean_nlp = rx;
579
580 /* Output scaled back up again to match input scaling */
581
582 return (int16_t) ec->clean_nlp << 1;
586} 583}
587 584
588/*- End of function --------------------------------------------------------*/ 585EXPORT_SYMBOL_GPL(oslec_update);
589 586
590/* This function is seperated from the echo canceller is it is usually called 587/* This function is seperated from the echo canceller is it is usually called
591 as part of the tx process. See rx HP (DC blocking) filter above, it's 588 as part of the tx process. See rx HP (DC blocking) filter above, it's
@@ -608,25 +605,35 @@ int16_t echo_can_update(echo_can_state_t *ec, int16_t tx, int16_t rx)
608 precision, which noise shapes things, giving very clean DC removal. 605 precision, which noise shapes things, giving very clean DC removal.
609*/ 606*/
610 607
611int16_t echo_can_hpf_tx(echo_can_state_t *ec, int16_t tx) { 608int16_t oslec_hpf_tx(struct oslec_state * ec, int16_t tx)
612 int tmp, tmp1; 609{
610 int tmp, tmp1;
613 611
614 if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) { 612 if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) {
615 tmp = tx << 15; 613 tmp = tx << 15;
616#if 1 614#if 1
617 /* Make sure the gain of the HPF is 1.0. The first can still saturate a little under 615 /* Make sure the gain of the HPF is 1.0. The first can still saturate a little under
618 impulse conditions, and it might roll to 32768 and need clipping on sustained peak 616 impulse conditions, and it might roll to 32768 and need clipping on sustained peak
619 level signals. However, the scale of such clipping is small, and the error due to 617 level signals. However, the scale of such clipping is small, and the error due to
620 any saturation should not markedly affect the downstream processing. */ 618 any saturation should not markedly affect the downstream processing. */
621 tmp -= (tmp >> 4); 619 tmp -= (tmp >> 4);
622#endif 620#endif
623 ec->tx_1 += -(ec->tx_1>>DC_LOG2BETA) + tmp - ec->tx_2; 621 ec->tx_1 += -(ec->tx_1 >> DC_LOG2BETA) + tmp - ec->tx_2;
624 tmp1 = ec->tx_1 >> 15; 622 tmp1 = ec->tx_1 >> 15;
625 if (tmp1 > 32767) tmp1 = 32767; 623 if (tmp1 > 32767)
626 if (tmp1 < -32767) tmp1 = -32767; 624 tmp1 = 32767;
627 tx = tmp1; 625 if (tmp1 < -32767)
628 ec->tx_2 = tmp; 626 tmp1 = -32767;
629 } 627 tx = tmp1;
630 628 ec->tx_2 = tmp;
631 return tx; 629 }
630
631 return tx;
632} 632}
633
634EXPORT_SYMBOL_GPL(oslec_hpf_tx);
635
636MODULE_LICENSE("GPL");
637MODULE_AUTHOR("David Rowe");
638MODULE_DESCRIPTION("Open Source Line Echo Canceller");
639MODULE_VERSION("0.3.0");
diff --git a/drivers/staging/echo/echo.h b/drivers/staging/echo/echo.h
index 7a91b4390f3b..9fb9543c4f13 100644
--- a/drivers/staging/echo/echo.h
+++ b/drivers/staging/echo/echo.h
@@ -118,23 +118,14 @@ a minor burden.
118*/ 118*/
119 119
120#include "fir.h" 120#include "fir.h"
121 121#include "oslec.h"
122/* Mask bits for the adaption mode */
123#define ECHO_CAN_USE_ADAPTION 0x01
124#define ECHO_CAN_USE_NLP 0x02
125#define ECHO_CAN_USE_CNG 0x04
126#define ECHO_CAN_USE_CLIP 0x08
127#define ECHO_CAN_USE_TX_HPF 0x10
128#define ECHO_CAN_USE_RX_HPF 0x20
129#define ECHO_CAN_DISABLE 0x40
130 122
131/*! 123/*!
132 G.168 echo canceller descriptor. This defines the working state for a line 124 G.168 echo canceller descriptor. This defines the working state for a line
133 echo canceller. 125 echo canceller.
134*/ 126*/
135typedef struct 127struct oslec_state {
136{ 128 int16_t tx, rx;
137 int16_t tx,rx;
138 int16_t clean; 129 int16_t clean;
139 int16_t clean_nlp; 130 int16_t clean_nlp;
140 131
@@ -176,45 +167,6 @@ typedef struct
176 167
177 /* snapshot sample of coeffs used for development */ 168 /* snapshot sample of coeffs used for development */
178 int16_t *snapshot; 169 int16_t *snapshot;
179} echo_can_state_t; 170};
180
181/*! Create a voice echo canceller context.
182 \param len The length of the canceller, in samples.
183 \return The new canceller context, or NULL if the canceller could not be created.
184*/
185echo_can_state_t *echo_can_create(int len, int adaption_mode);
186
187/*! Free a voice echo canceller context.
188 \param ec The echo canceller context.
189*/
190void echo_can_free(echo_can_state_t *ec);
191
192/*! Flush (reinitialise) a voice echo canceller context.
193 \param ec The echo canceller context.
194*/
195void echo_can_flush(echo_can_state_t *ec);
196
197/*! Set the adaption mode of a voice echo canceller context.
198 \param ec The echo canceller context.
199 \param adapt The mode.
200*/
201void echo_can_adaption_mode(echo_can_state_t *ec, int adaption_mode);
202
203void echo_can_snapshot(echo_can_state_t *ec);
204
205/*! Process a sample through a voice echo canceller.
206 \param ec The echo canceller context.
207 \param tx The transmitted audio sample.
208 \param rx The received audio sample.
209 \return The clean (echo cancelled) received sample.
210*/
211int16_t echo_can_update(echo_can_state_t *ec, int16_t tx, int16_t rx);
212
213/*! Process to high pass filter the tx signal.
214 \param ec The echo canceller context.
215 \param tx The transmitted auio sample.
216 \return The HP filtered transmit sample, send this to your D/A.
217*/
218int16_t echo_can_hpf_tx(echo_can_state_t *ec, int16_t tx);
219 171
220#endif /* __ECHO_H */ 172#endif /* __ECHO_H */
diff --git a/drivers/staging/echo/fir.h b/drivers/staging/echo/fir.h
index e1bfc4994886..5645cb1b2f90 100644
--- a/drivers/staging/echo/fir.h
+++ b/drivers/staging/echo/fir.h
@@ -72,8 +72,7 @@
72 16 bit integer FIR descriptor. This defines the working state for a single 72 16 bit integer FIR descriptor. This defines the working state for a single
73 instance of an FIR filter using 16 bit integer coefficients. 73 instance of an FIR filter using 16 bit integer coefficients.
74*/ 74*/
75typedef struct 75typedef struct {
76{
77 int taps; 76 int taps;
78 int curr_pos; 77 int curr_pos;
79 const int16_t *coeffs; 78 const int16_t *coeffs;
@@ -85,8 +84,7 @@ typedef struct
85 instance of an FIR filter using 32 bit integer coefficients, and filtering 84 instance of an FIR filter using 32 bit integer coefficients, and filtering
86 16 bit integer data. 85 16 bit integer data.
87*/ 86*/
88typedef struct 87typedef struct {
89{
90 int taps; 88 int taps;
91 int curr_pos; 89 int curr_pos;
92 const int32_t *coeffs; 90 const int32_t *coeffs;
@@ -97,273 +95,201 @@ typedef struct
97 Floating point FIR descriptor. This defines the working state for a single 95 Floating point FIR descriptor. This defines the working state for a single
98 instance of an FIR filter using floating point coefficients and data. 96 instance of an FIR filter using floating point coefficients and data.
99*/ 97*/
100typedef struct 98typedef struct {
101{
102 int taps; 99 int taps;
103 int curr_pos; 100 int curr_pos;
104 const float *coeffs; 101 const float *coeffs;
105 float *history; 102 float *history;
106} fir_float_state_t; 103} fir_float_state_t;
107 104
108#ifdef __cplusplus 105static __inline__ const int16_t *fir16_create(fir16_state_t * fir,
109extern "C" { 106 const int16_t * coeffs, int taps)
110#endif
111
112static __inline__ const int16_t *fir16_create(fir16_state_t *fir,
113 const int16_t *coeffs,
114 int taps)
115{ 107{
116 fir->taps = taps; 108 fir->taps = taps;
117 fir->curr_pos = taps - 1; 109 fir->curr_pos = taps - 1;
118 fir->coeffs = coeffs; 110 fir->coeffs = coeffs;
119#if defined(USE_MMX) || defined(USE_SSE2) || defined(__BLACKFIN_ASM__) 111#if defined(USE_MMX) || defined(USE_SSE2) || defined(__bfin__)
120 if ((fir->history = malloc(2*taps*sizeof(int16_t)))) 112 fir->history = kcalloc(2 * taps, sizeof(int16_t), GFP_KERNEL);
121 memset(fir->history, 0, 2*taps*sizeof(int16_t));
122#else 113#else
123 if ((fir->history = (int16_t *) malloc(taps*sizeof(int16_t)))) 114 fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL);
124 memset(fir->history, 0, taps*sizeof(int16_t));
125#endif 115#endif
126 return fir->history; 116 return fir->history;
127} 117}
128/*- End of function --------------------------------------------------------*/
129 118
130static __inline__ void fir16_flush(fir16_state_t *fir) 119static __inline__ void fir16_flush(fir16_state_t * fir)
131{ 120{
132#if defined(USE_MMX) || defined(USE_SSE2) || defined(__BLACKFIN_ASM__) 121#if defined(USE_MMX) || defined(USE_SSE2) || defined(__bfin__)
133 memset(fir->history, 0, 2*fir->taps*sizeof(int16_t)); 122 memset(fir->history, 0, 2 * fir->taps * sizeof(int16_t));
134#else 123#else
135 memset(fir->history, 0, fir->taps*sizeof(int16_t)); 124 memset(fir->history, 0, fir->taps * sizeof(int16_t));
136#endif 125#endif
137} 126}
138/*- End of function --------------------------------------------------------*/
139 127
140static __inline__ void fir16_free(fir16_state_t *fir) 128static __inline__ void fir16_free(fir16_state_t * fir)
141{ 129{
142 free(fir->history); 130 kfree(fir->history);
143} 131}
144/*- End of function --------------------------------------------------------*/
145 132
146#ifdef __BLACKFIN_ASM__ 133#ifdef __bfin__
147static inline int32_t dot_asm(short *x, short *y, int len) 134static inline int32_t dot_asm(short *x, short *y, int len)
148{ 135{
149 int dot; 136 int dot;
150 137
151 len--; 138 len--;
152 139
153 __asm__ 140 __asm__("I0 = %1;\n\t"
154 ( 141 "I1 = %2;\n\t"
155 "I0 = %1;\n\t" 142 "A0 = 0;\n\t"
156 "I1 = %2;\n\t" 143 "R0.L = W[I0++] || R1.L = W[I1++];\n\t"
157 "A0 = 0;\n\t" 144 "LOOP dot%= LC0 = %3;\n\t"
158 "R0.L = W[I0++] || R1.L = W[I1++];\n\t" 145 "LOOP_BEGIN dot%=;\n\t"
159 "LOOP dot%= LC0 = %3;\n\t" 146 "A0 += R0.L * R1.L (IS) || R0.L = W[I0++] || R1.L = W[I1++];\n\t"
160 "LOOP_BEGIN dot%=;\n\t" 147 "LOOP_END dot%=;\n\t"
161 "A0 += R0.L * R1.L (IS) || R0.L = W[I0++] || R1.L = W[I1++];\n\t" 148 "A0 += R0.L*R1.L (IS);\n\t"
162 "LOOP_END dot%=;\n\t" 149 "R0 = A0;\n\t"
163 "A0 += R0.L*R1.L (IS);\n\t" 150 "%0 = R0;\n\t"
164 "R0 = A0;\n\t" 151 :"=&d"(dot)
165 "%0 = R0;\n\t" 152 :"a"(x), "a"(y), "a"(len)
166 : "=&d" (dot) 153 :"I0", "I1", "A1", "A0", "R0", "R1"
167 : "a" (x), "a" (y), "a" (len) 154 );
168 : "I0", "I1", "A1", "A0", "R0", "R1" 155
169 ); 156 return dot;
170
171 return dot;
172} 157}
173#endif 158#endif
174/*- End of function --------------------------------------------------------*/
175 159
176static __inline__ int16_t fir16(fir16_state_t *fir, int16_t sample) 160static __inline__ int16_t fir16(fir16_state_t * fir, int16_t sample)
177{ 161{
178 int32_t y; 162 int32_t y;
179#if defined(USE_MMX) 163#if defined(USE_MMX)
180 int i; 164 int i;
181 mmx_t *mmx_coeffs; 165 mmx_t *mmx_coeffs;
182 mmx_t *mmx_hist; 166 mmx_t *mmx_hist;
183 167
184 fir->history[fir->curr_pos] = sample; 168 fir->history[fir->curr_pos] = sample;
185 fir->history[fir->curr_pos + fir->taps] = sample; 169 fir->history[fir->curr_pos + fir->taps] = sample;
186 170
187 mmx_coeffs = (mmx_t *) fir->coeffs; 171 mmx_coeffs = (mmx_t *) fir->coeffs;
188 mmx_hist = (mmx_t *) &fir->history[fir->curr_pos]; 172 mmx_hist = (mmx_t *) & fir->history[fir->curr_pos];
189 i = fir->taps; 173 i = fir->taps;
190 pxor_r2r(mm4, mm4); 174 pxor_r2r(mm4, mm4);
191 /* 8 samples per iteration, so the filter must be a multiple of 8 long. */ 175 /* 8 samples per iteration, so the filter must be a multiple of 8 long. */
192 while (i > 0) 176 while (i > 0) {
193 { 177 movq_m2r(mmx_coeffs[0], mm0);
194 movq_m2r(mmx_coeffs[0], mm0); 178 movq_m2r(mmx_coeffs[1], mm2);
195 movq_m2r(mmx_coeffs[1], mm2); 179 movq_m2r(mmx_hist[0], mm1);
196 movq_m2r(mmx_hist[0], mm1); 180 movq_m2r(mmx_hist[1], mm3);
197 movq_m2r(mmx_hist[1], mm3); 181 mmx_coeffs += 2;
198 mmx_coeffs += 2; 182 mmx_hist += 2;
199 mmx_hist += 2; 183 pmaddwd_r2r(mm1, mm0);
200 pmaddwd_r2r(mm1, mm0); 184 pmaddwd_r2r(mm3, mm2);
201 pmaddwd_r2r(mm3, mm2); 185 paddd_r2r(mm0, mm4);
202 paddd_r2r(mm0, mm4); 186 paddd_r2r(mm2, mm4);
203 paddd_r2r(mm2, mm4); 187 i -= 8;
204 i -= 8; 188 }
205 } 189 movq_r2r(mm4, mm0);
206 movq_r2r(mm4, mm0); 190 psrlq_i2r(32, mm0);
207 psrlq_i2r(32, mm0); 191 paddd_r2r(mm0, mm4);
208 paddd_r2r(mm0, mm4); 192 movd_r2m(mm4, y);
209 movd_r2m(mm4, y); 193 emms();
210 emms();
211#elif defined(USE_SSE2) 194#elif defined(USE_SSE2)
212 int i; 195 int i;
213 xmm_t *xmm_coeffs; 196 xmm_t *xmm_coeffs;
214 xmm_t *xmm_hist; 197 xmm_t *xmm_hist;
215 198
216 fir->history[fir->curr_pos] = sample; 199 fir->history[fir->curr_pos] = sample;
217 fir->history[fir->curr_pos + fir->taps] = sample; 200 fir->history[fir->curr_pos + fir->taps] = sample;
218 201
219 xmm_coeffs = (xmm_t *) fir->coeffs; 202 xmm_coeffs = (xmm_t *) fir->coeffs;
220 xmm_hist = (xmm_t *) &fir->history[fir->curr_pos]; 203 xmm_hist = (xmm_t *) & fir->history[fir->curr_pos];
221 i = fir->taps; 204 i = fir->taps;
222 pxor_r2r(xmm4, xmm4); 205 pxor_r2r(xmm4, xmm4);
223 /* 16 samples per iteration, so the filter must be a multiple of 16 long. */ 206 /* 16 samples per iteration, so the filter must be a multiple of 16 long. */
224 while (i > 0) 207 while (i > 0) {
225 { 208 movdqu_m2r(xmm_coeffs[0], xmm0);
226 movdqu_m2r(xmm_coeffs[0], xmm0); 209 movdqu_m2r(xmm_coeffs[1], xmm2);
227 movdqu_m2r(xmm_coeffs[1], xmm2); 210 movdqu_m2r(xmm_hist[0], xmm1);
228 movdqu_m2r(xmm_hist[0], xmm1); 211 movdqu_m2r(xmm_hist[1], xmm3);
229 movdqu_m2r(xmm_hist[1], xmm3); 212 xmm_coeffs += 2;
230 xmm_coeffs += 2; 213 xmm_hist += 2;
231 xmm_hist += 2; 214 pmaddwd_r2r(xmm1, xmm0);
232 pmaddwd_r2r(xmm1, xmm0); 215 pmaddwd_r2r(xmm3, xmm2);
233 pmaddwd_r2r(xmm3, xmm2); 216 paddd_r2r(xmm0, xmm4);
234 paddd_r2r(xmm0, xmm4); 217 paddd_r2r(xmm2, xmm4);
235 paddd_r2r(xmm2, xmm4); 218 i -= 16;
236 i -= 16; 219 }
237 } 220 movdqa_r2r(xmm4, xmm0);
238 movdqa_r2r(xmm4, xmm0); 221 psrldq_i2r(8, xmm0);
239 psrldq_i2r(8, xmm0); 222 paddd_r2r(xmm0, xmm4);
240 paddd_r2r(xmm0, xmm4); 223 movdqa_r2r(xmm4, xmm0);
241 movdqa_r2r(xmm4, xmm0); 224 psrldq_i2r(4, xmm0);
242 psrldq_i2r(4, xmm0); 225 paddd_r2r(xmm0, xmm4);
243 paddd_r2r(xmm0, xmm4); 226 movd_r2m(xmm4, y);
244 movd_r2m(xmm4, y); 227#elif defined(__bfin__)
245#elif defined(__BLACKFIN_ASM__) 228 fir->history[fir->curr_pos] = sample;
246 fir->history[fir->curr_pos] = sample; 229 fir->history[fir->curr_pos + fir->taps] = sample;
247 fir->history[fir->curr_pos + fir->taps] = sample; 230 y = dot_asm((int16_t *) fir->coeffs, &fir->history[fir->curr_pos],
248 y = dot_asm((int16_t*)fir->coeffs, &fir->history[fir->curr_pos], fir->taps); 231 fir->taps);
249#else 232#else
250 int i; 233 int i;
251 int offset1; 234 int offset1;
252 int offset2; 235 int offset2;
253 236
254 fir->history[fir->curr_pos] = sample; 237 fir->history[fir->curr_pos] = sample;
255 238
256 offset2 = fir->curr_pos; 239 offset2 = fir->curr_pos;
257 offset1 = fir->taps - offset2; 240 offset1 = fir->taps - offset2;
258 y = 0; 241 y = 0;
259 for (i = fir->taps - 1; i >= offset1; i--) 242 for (i = fir->taps - 1; i >= offset1; i--)
260 y += fir->coeffs[i]*fir->history[i - offset1]; 243 y += fir->coeffs[i] * fir->history[i - offset1];
261 for ( ; i >= 0; i--) 244 for (; i >= 0; i--)
262 y += fir->coeffs[i]*fir->history[i + offset2]; 245 y += fir->coeffs[i] * fir->history[i + offset2];
263#endif 246#endif
264 if (fir->curr_pos <= 0) 247 if (fir->curr_pos <= 0)
265 fir->curr_pos = fir->taps; 248 fir->curr_pos = fir->taps;
266 fir->curr_pos--; 249 fir->curr_pos--;
267 return (int16_t) (y >> 15); 250 return (int16_t) (y >> 15);
268}
269/*- End of function --------------------------------------------------------*/
270
271static __inline__ const int16_t *fir32_create(fir32_state_t *fir,
272 const int32_t *coeffs,
273 int taps)
274{
275 fir->taps = taps;
276 fir->curr_pos = taps - 1;
277 fir->coeffs = coeffs;
278 fir->history = (int16_t *) malloc(taps*sizeof(int16_t));
279 if (fir->history)
280 memset(fir->history, '\0', taps*sizeof(int16_t));
281 return fir->history;
282}
283/*- End of function --------------------------------------------------------*/
284
285static __inline__ void fir32_flush(fir32_state_t *fir)
286{
287 memset(fir->history, 0, fir->taps*sizeof(int16_t));
288} 251}
289/*- End of function --------------------------------------------------------*/
290 252
291static __inline__ void fir32_free(fir32_state_t *fir) 253static __inline__ const int16_t *fir32_create(fir32_state_t * fir,
254 const int32_t * coeffs, int taps)
292{ 255{
293 free(fir->history); 256 fir->taps = taps;
294} 257 fir->curr_pos = taps - 1;
295/*- End of function --------------------------------------------------------*/ 258 fir->coeffs = coeffs;
296 259 fir->history = kcalloc(taps, sizeof(int16_t), GFP_KERNEL);
297static __inline__ int16_t fir32(fir32_state_t *fir, int16_t sample) 260 return fir->history;
298{
299 int i;
300 int32_t y;
301 int offset1;
302 int offset2;
303
304 fir->history[fir->curr_pos] = sample;
305 offset2 = fir->curr_pos;
306 offset1 = fir->taps - offset2;
307 y = 0;
308 for (i = fir->taps - 1; i >= offset1; i--)
309 y += fir->coeffs[i]*fir->history[i - offset1];
310 for ( ; i >= 0; i--)
311 y += fir->coeffs[i]*fir->history[i + offset2];
312 if (fir->curr_pos <= 0)
313 fir->curr_pos = fir->taps;
314 fir->curr_pos--;
315 return (int16_t) (y >> 15);
316} 261}
317/*- End of function --------------------------------------------------------*/
318 262
319#ifndef __KERNEL__ 263static __inline__ void fir32_flush(fir32_state_t * fir)
320static __inline__ const float *fir_float_create(fir_float_state_t *fir,
321 const float *coeffs,
322 int taps)
323{ 264{
324 fir->taps = taps; 265 memset(fir->history, 0, fir->taps * sizeof(int16_t));
325 fir->curr_pos = taps - 1;
326 fir->coeffs = coeffs;
327 fir->history = (float *) malloc(taps*sizeof(float));
328 if (fir->history)
329 memset(fir->history, '\0', taps*sizeof(float));
330 return fir->history;
331} 266}
332/*- End of function --------------------------------------------------------*/
333 267
334static __inline__ void fir_float_free(fir_float_state_t *fir) 268static __inline__ void fir32_free(fir32_state_t * fir)
335{ 269{
336 free(fir->history); 270 kfree(fir->history);
337} 271}
338/*- End of function --------------------------------------------------------*/
339 272
340static __inline__ int16_t fir_float(fir_float_state_t *fir, int16_t sample) 273static __inline__ int16_t fir32(fir32_state_t * fir, int16_t sample)
341{ 274{
342 int i; 275 int i;
343 float y; 276 int32_t y;
344 int offset1; 277 int offset1;
345 int offset2; 278 int offset2;
346 279
347 fir->history[fir->curr_pos] = sample; 280 fir->history[fir->curr_pos] = sample;
348 281 offset2 = fir->curr_pos;
349 offset2 = fir->curr_pos; 282 offset1 = fir->taps - offset2;
350 offset1 = fir->taps - offset2; 283 y = 0;
351 y = 0; 284 for (i = fir->taps - 1; i >= offset1; i--)
352 for (i = fir->taps - 1; i >= offset1; i--) 285 y += fir->coeffs[i] * fir->history[i - offset1];
353 y += fir->coeffs[i]*fir->history[i - offset1]; 286 for (; i >= 0; i--)
354 for ( ; i >= 0; i--) 287 y += fir->coeffs[i] * fir->history[i + offset2];
355 y += fir->coeffs[i]*fir->history[i + offset2]; 288 if (fir->curr_pos <= 0)
356 if (fir->curr_pos <= 0) 289 fir->curr_pos = fir->taps;
357 fir->curr_pos = fir->taps; 290 fir->curr_pos--;
358 fir->curr_pos--; 291 return (int16_t) (y >> 15);
359 return (int16_t) y;
360} 292}
361/*- End of function --------------------------------------------------------*/
362#endif
363
364#ifdef __cplusplus
365}
366#endif
367 293
368#endif 294#endif
369/*- End of file ------------------------------------------------------------*/ 295/*- End of file ------------------------------------------------------------*/
diff --git a/drivers/staging/echo/mmx.h b/drivers/staging/echo/mmx.h
index b5a3964865b6..35412efe61ce 100644
--- a/drivers/staging/echo/mmx.h
+++ b/drivers/staging/echo/mmx.h
@@ -27,24 +27,23 @@
27 * values by ULL, lest they be truncated by the compiler) 27 * values by ULL, lest they be truncated by the compiler)
28 */ 28 */
29 29
30typedef union { 30typedef union {
31 long long q; /* Quadword (64-bit) value */ 31 long long q; /* Quadword (64-bit) value */
32 unsigned long long uq; /* Unsigned Quadword */ 32 unsigned long long uq; /* Unsigned Quadword */
33 int d[2]; /* 2 Doubleword (32-bit) values */ 33 int d[2]; /* 2 Doubleword (32-bit) values */
34 unsigned int ud[2]; /* 2 Unsigned Doubleword */ 34 unsigned int ud[2]; /* 2 Unsigned Doubleword */
35 short w[4]; /* 4 Word (16-bit) values */ 35 short w[4]; /* 4 Word (16-bit) values */
36 unsigned short uw[4]; /* 4 Unsigned Word */ 36 unsigned short uw[4]; /* 4 Unsigned Word */
37 char b[8]; /* 8 Byte (8-bit) values */ 37 char b[8]; /* 8 Byte (8-bit) values */
38 unsigned char ub[8]; /* 8 Unsigned Byte */ 38 unsigned char ub[8]; /* 8 Unsigned Byte */
39 float s[2]; /* Single-precision (32-bit) value */ 39 float s[2]; /* Single-precision (32-bit) value */
40} mmx_t; /* On an 8-byte (64-bit) boundary */ 40} mmx_t; /* On an 8-byte (64-bit) boundary */
41 41
42/* SSE registers */ 42/* SSE registers */
43typedef union { 43typedef union {
44 char b[16]; 44 char b[16];
45} xmm_t; 45} xmm_t;
46 46
47
48#define mmx_i2r(op,imm,reg) \ 47#define mmx_i2r(op,imm,reg) \
49 __asm__ __volatile__ (#op " %0, %%" #reg \ 48 __asm__ __volatile__ (#op " %0, %%" #reg \
50 : /* nothing */ \ 49 : /* nothing */ \
@@ -63,7 +62,6 @@ typedef union {
63#define mmx_r2r(op,regs,regd) \ 62#define mmx_r2r(op,regs,regd) \
64 __asm__ __volatile__ (#op " %" #regs ", %" #regd) 63 __asm__ __volatile__ (#op " %" #regs ", %" #regd)
65 64
66
67#define emms() __asm__ __volatile__ ("emms") 65#define emms() __asm__ __volatile__ ("emms")
68 66
69#define movd_m2r(var,reg) mmx_m2r (movd, var, reg) 67#define movd_m2r(var,reg) mmx_m2r (movd, var, reg)
@@ -192,16 +190,13 @@ typedef union {
192#define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg) 190#define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg)
193#define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd) 191#define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd)
194 192
195
196/* 3DNOW extensions */ 193/* 3DNOW extensions */
197 194
198#define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg) 195#define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg)
199#define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd) 196#define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd)
200 197
201
202/* AMD MMX extensions - also available in intel SSE */ 198/* AMD MMX extensions - also available in intel SSE */
203 199
204
205#define mmx_m2ri(op,mem,reg,imm) \ 200#define mmx_m2ri(op,mem,reg,imm) \
206 __asm__ __volatile__ (#op " %1, %0, %%" #reg \ 201 __asm__ __volatile__ (#op " %1, %0, %%" #reg \
207 : /* nothing */ \ 202 : /* nothing */ \
@@ -216,7 +211,6 @@ typedef union {
216 : /* nothing */ \ 211 : /* nothing */ \
217 : "m" (mem)) 212 : "m" (mem))
218 213
219
220#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg) 214#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg)
221 215
222#define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var) 216#define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var)
@@ -284,5 +278,4 @@ typedef union {
284#define punpcklqdq_r2r(regs,regd) mmx_r2r (punpcklqdq, regs, regd) 278#define punpcklqdq_r2r(regs,regd) mmx_r2r (punpcklqdq, regs, regd)
285#define punpckhqdq_r2r(regs,regd) mmx_r2r (punpckhqdq, regs, regd) 279#define punpckhqdq_r2r(regs,regd) mmx_r2r (punpckhqdq, regs, regd)
286 280
287
288#endif /* AVCODEC_I386MMX_H */ 281#endif /* AVCODEC_I386MMX_H */
diff --git a/drivers/staging/echo/oslec.h b/drivers/staging/echo/oslec.h
new file mode 100644
index 000000000000..bad852328a2f
--- /dev/null
+++ b/drivers/staging/echo/oslec.h
@@ -0,0 +1,86 @@
1/*
2 * OSLEC - A line echo canceller. This code is being developed
3 * against and partially complies with G168. Using code from SpanDSP
4 *
5 * Written by Steve Underwood <steveu@coppice.org>
6 * and David Rowe <david_at_rowetel_dot_com>
7 *
8 * Copyright (C) 2001 Steve Underwood and 2007-2008 David Rowe
9 *
10 * All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2, as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 */
26
27#ifndef __OSLEC_H
28#define __OSLEC_H
29
30/* TODO: document interface */
31
32/* Mask bits for the adaption mode */
33#define ECHO_CAN_USE_ADAPTION 0x01
34#define ECHO_CAN_USE_NLP 0x02
35#define ECHO_CAN_USE_CNG 0x04
36#define ECHO_CAN_USE_CLIP 0x08
37#define ECHO_CAN_USE_TX_HPF 0x10
38#define ECHO_CAN_USE_RX_HPF 0x20
39#define ECHO_CAN_DISABLE 0x40
40
41/*!
42 G.168 echo canceller descriptor. This defines the working state for a line
43 echo canceller.
44*/
45struct oslec_state;
46
47/*! Create a voice echo canceller context.
48 \param len The length of the canceller, in samples.
49 \return The new canceller context, or NULL if the canceller could not be created.
50*/
51struct oslec_state *oslec_create(int len, int adaption_mode);
52
53/*! Free a voice echo canceller context.
54 \param ec The echo canceller context.
55*/
56void oslec_free(struct oslec_state *ec);
57
58/*! Flush (reinitialise) a voice echo canceller context.
59 \param ec The echo canceller context.
60*/
61void oslec_flush(struct oslec_state *ec);
62
63/*! Set the adaption mode of a voice echo canceller context.
64 \param ec The echo canceller context.
65 \param adapt The mode.
66*/
67void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode);
68
69void oslec_snapshot(struct oslec_state *ec);
70
71/*! Process a sample through a voice echo canceller.
72 \param ec The echo canceller context.
73 \param tx The transmitted audio sample.
74 \param rx The received audio sample.
75 \return The clean (echo cancelled) received sample.
76*/
77int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx);
78
79/*! Process to high pass filter the tx signal.
80 \param ec The echo canceller context.
81 \param tx The transmitted auio sample.
82 \return The HP filtered transmit sample, send this to your D/A.
83*/
84int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx);
85
86#endif /* __OSLEC_H */
diff --git a/drivers/staging/et131x/et1310_phy.c b/drivers/staging/et131x/et1310_phy.c
index 6c4fa54419ea..9dd6dfd9a033 100644
--- a/drivers/staging/et131x/et1310_phy.c
+++ b/drivers/staging/et131x/et1310_phy.c
@@ -84,7 +84,6 @@
84#include <linux/if_arp.h> 84#include <linux/if_arp.h>
85#include <linux/ioport.h> 85#include <linux/ioport.h>
86#include <linux/random.h> 86#include <linux/random.h>
87#include <linux/delay.h>
88 87
89#include "et1310_phy.h" 88#include "et1310_phy.h"
90#include "et1310_pm.h" 89#include "et1310_pm.h"
@@ -95,7 +94,6 @@
95#include "et131x_initpci.h" 94#include "et131x_initpci.h"
96 95
97#include "et1310_address_map.h" 96#include "et1310_address_map.h"
98#include "et1310_jagcore.h"
99#include "et1310_tx.h" 97#include "et1310_tx.h"
100#include "et1310_rx.h" 98#include "et1310_rx.h"
101#include "et1310_mac.h" 99#include "et1310_mac.h"
diff --git a/drivers/staging/et131x/et131x_debug.c b/drivers/staging/et131x/et131x_debug.c
index 9ee5bce92c27..d1dd46e0a9c8 100644
--- a/drivers/staging/et131x/et131x_debug.c
+++ b/drivers/staging/et131x/et131x_debug.c
@@ -97,7 +97,6 @@
97#include "et131x_isr.h" 97#include "et131x_isr.h"
98 98
99#include "et1310_address_map.h" 99#include "et1310_address_map.h"
100#include "et1310_jagcore.h"
101#include "et1310_tx.h" 100#include "et1310_tx.h"
102#include "et1310_rx.h" 101#include "et1310_rx.h"
103#include "et1310_mac.h" 102#include "et1310_mac.h"
diff --git a/drivers/staging/et131x/et131x_initpci.c b/drivers/staging/et131x/et131x_initpci.c
index 4c6f171f5b7c..a18c499d0ae0 100644
--- a/drivers/staging/et131x/et131x_initpci.c
+++ b/drivers/staging/et131x/et131x_initpci.c
@@ -97,7 +97,6 @@
97#include "et131x_isr.h" 97#include "et131x_isr.h"
98 98
99#include "et1310_address_map.h" 99#include "et1310_address_map.h"
100#include "et1310_jagcore.h"
101#include "et1310_tx.h" 100#include "et1310_tx.h"
102#include "et1310_rx.h" 101#include "et1310_rx.h"
103#include "et1310_mac.h" 102#include "et1310_mac.h"
diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/go7007/go7007-driver.c
index 81ae4b0fa890..e4ead96679c8 100644
--- a/drivers/staging/go7007/go7007-driver.c
+++ b/drivers/staging/go7007/go7007-driver.c
@@ -16,7 +16,6 @@
16 */ 16 */
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/version.h>
20#include <linux/init.h> 19#include <linux/init.h>
21#include <linux/delay.h> 20#include <linux/delay.h>
22#include <linux/sched.h> 21#include <linux/sched.h>
diff --git a/drivers/staging/go7007/go7007-fw.c b/drivers/staging/go7007/go7007-fw.c
index c2aea1020b0d..a0e17b0e0ce3 100644
--- a/drivers/staging/go7007/go7007-fw.c
+++ b/drivers/staging/go7007/go7007-fw.c
@@ -26,7 +26,6 @@
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/version.h>
30#include <linux/time.h> 29#include <linux/time.h>
31#include <linux/mm.h> 30#include <linux/mm.h>
32#include <linux/device.h> 31#include <linux/device.h>
diff --git a/drivers/staging/go7007/go7007-i2c.c b/drivers/staging/go7007/go7007-i2c.c
index 10baae3dade6..cd55b76eabc7 100644
--- a/drivers/staging/go7007/go7007-i2c.c
+++ b/drivers/staging/go7007/go7007-i2c.c
@@ -15,7 +15,6 @@
15 * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 15 * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 */ 16 */
17 17
18#include <linux/version.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/init.h> 19#include <linux/init.h>
21#include <linux/delay.h> 20#include <linux/delay.h>
diff --git a/drivers/staging/go7007/go7007-usb.c b/drivers/staging/go7007/go7007-usb.c
index d4ed6d2b715f..3f5ee3424e72 100644
--- a/drivers/staging/go7007/go7007-usb.c
+++ b/drivers/staging/go7007/go7007-usb.c
@@ -16,7 +16,6 @@
16 */ 16 */
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/version.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/init.h> 20#include <linux/init.h>
22#include <linux/wait.h> 21#include <linux/wait.h>
diff --git a/drivers/staging/go7007/snd-go7007.c b/drivers/staging/go7007/snd-go7007.c
index 382740c405ff..a7de401f61ab 100644
--- a/drivers/staging/go7007/snd-go7007.c
+++ b/drivers/staging/go7007/snd-go7007.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/version.h>
21#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
22#include <linux/init.h> 21#include <linux/init.h>
23#include <linux/spinlock.h> 22#include <linux/spinlock.h>
diff --git a/drivers/staging/go7007/wis-ov7640.c b/drivers/staging/go7007/wis-ov7640.c
index f5f11e927af3..2f9efca04606 100644
--- a/drivers/staging/go7007/wis-ov7640.c
+++ b/drivers/staging/go7007/wis-ov7640.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23 22
diff --git a/drivers/staging/go7007/wis-saa7113.c b/drivers/staging/go7007/wis-saa7113.c
index c1aff1b923a0..11689723945e 100644
--- a/drivers/staging/go7007/wis-saa7113.c
+++ b/drivers/staging/go7007/wis-saa7113.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <linux/ioctl.h> 22#include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-saa7115.c b/drivers/staging/go7007/wis-saa7115.c
index 5c94c883b312..59417a7174d7 100644
--- a/drivers/staging/go7007/wis-saa7115.c
+++ b/drivers/staging/go7007/wis-saa7115.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <linux/ioctl.h> 22#include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-sony-tuner.c b/drivers/staging/go7007/wis-sony-tuner.c
index 5997fb479459..5a91ee409a7c 100644
--- a/drivers/staging/go7007/wis-sony-tuner.c
+++ b/drivers/staging/go7007/wis-sony-tuner.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <media/tuner.h> 22#include <media/tuner.h>
diff --git a/drivers/staging/go7007/wis-tw2804.c b/drivers/staging/go7007/wis-tw2804.c
index 27fe4d0d4ed6..57b8f2b1caa3 100644
--- a/drivers/staging/go7007/wis-tw2804.c
+++ b/drivers/staging/go7007/wis-tw2804.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <linux/ioctl.h> 22#include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-tw9903.c b/drivers/staging/go7007/wis-tw9903.c
index d8e41968022e..40627b282cb4 100644
--- a/drivers/staging/go7007/wis-tw9903.c
+++ b/drivers/staging/go7007/wis-tw9903.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <linux/ioctl.h> 22#include <linux/ioctl.h>
diff --git a/drivers/staging/go7007/wis-uda1342.c b/drivers/staging/go7007/wis-uda1342.c
index a0894e3cb8c7..555645c0cc1a 100644
--- a/drivers/staging/go7007/wis-uda1342.c
+++ b/drivers/staging/go7007/wis-uda1342.c
@@ -17,7 +17,6 @@
17 17
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/version.h>
21#include <linux/i2c.h> 20#include <linux/i2c.h>
22#include <linux/videodev2.h> 21#include <linux/videodev2.h>
23#include <media/tvaudio.h> 22#include <media/tvaudio.h>
diff --git a/drivers/staging/me4000/me4000.c b/drivers/staging/me4000/me4000.c
index 862dd7ffb5c0..0b33773bb4f6 100644
--- a/drivers/staging/me4000/me4000.c
+++ b/drivers/staging/me4000/me4000.c
@@ -25,24 +25,21 @@
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <asm/io.h>
29#include <asm/system.h>
30#include <asm/uaccess.h>
31#include <linux/errno.h> 28#include <linux/errno.h>
32#include <linux/delay.h> 29#include <linux/delay.h>
33#include <linux/fs.h>
34#include <linux/mm.h> 30#include <linux/mm.h>
35#include <linux/unistd.h> 31#include <linux/unistd.h>
36#include <linux/list.h> 32#include <linux/list.h>
37#include <linux/proc_fs.h> 33#include <linux/proc_fs.h>
38 34#include <linux/types.h>
39#include <linux/poll.h> 35#include <linux/poll.h>
40#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/slab.h>
41#include <asm/pgtable.h> 38#include <asm/pgtable.h>
42#include <asm/uaccess.h> 39#include <asm/uaccess.h>
43#include <linux/types.h> 40#include <asm/io.h>
44 41#include <asm/system.h>
45#include <linux/slab.h> 42#include <asm/uaccess.h>
46 43
47/* Include-File for the Meilhaus ME-4000 I/O board */ 44/* Include-File for the Meilhaus ME-4000 I/O board */
48#include "me4000.h" 45#include "me4000.h"
@@ -57,14 +54,14 @@ MODULE_SUPPORTED_DEVICE("Meilhaus ME-4000 Multi I/O boards");
57MODULE_LICENSE("GPL"); 54MODULE_LICENSE("GPL");
58 55
59/* Board specific data are kept in a global list */ 56/* Board specific data are kept in a global list */
60LIST_HEAD(me4000_board_info_list); 57static LIST_HEAD(me4000_board_info_list);
61 58
62/* Major Device Numbers. 0 means to get it automatically from the System */ 59/* Major Device Numbers. 0 means to get it automatically from the System */
63static int me4000_ao_major_driver_no = 0; 60static int me4000_ao_major_driver_no;
64static int me4000_ai_major_driver_no = 0; 61static int me4000_ai_major_driver_no;
65static int me4000_dio_major_driver_no = 0; 62static int me4000_dio_major_driver_no;
66static int me4000_cnt_major_driver_no = 0; 63static int me4000_cnt_major_driver_no;
67static int me4000_ext_int_major_driver_no = 0; 64static int me4000_ext_int_major_driver_no;
68 65
69/* Let the user specify a custom major driver number */ 66/* Let the user specify a custom major driver number */
70module_param(me4000_ao_major_driver_no, int, 0); 67module_param(me4000_ao_major_driver_no, int, 0);
@@ -88,36 +85,22 @@ MODULE_PARM_DESC(me4000_ext_int_major_driver_no,
88 "Major driver number for external interrupt (default 0)"); 85 "Major driver number for external interrupt (default 0)");
89 86
90/*----------------------------------------------------------------------------- 87/*-----------------------------------------------------------------------------
91 Module stuff
92 ---------------------------------------------------------------------------*/
93int init_module(void);
94void cleanup_module(void);
95
96/*-----------------------------------------------------------------------------
97 Board detection and initialization 88 Board detection and initialization
98 ---------------------------------------------------------------------------*/ 89 ---------------------------------------------------------------------------*/
99static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id); 90static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id);
100static int me4000_xilinx_download(me4000_info_t *); 91static int me4000_xilinx_download(struct me4000_info *);
101static int me4000_reset_board(me4000_info_t *); 92static int me4000_reset_board(struct me4000_info *);
102 93
103static void clear_board_info_list(void); 94static void clear_board_info_list(void);
104static int get_registers(struct pci_dev *dev, me4000_info_t * info); 95static void release_ao_contexts(struct me4000_info *board_info);
105static int init_board_info(struct pci_dev *dev, me4000_info_t * board_info);
106static int alloc_ao_contexts(me4000_info_t * info);
107static void release_ao_contexts(me4000_info_t * board_info);
108static int alloc_ai_context(me4000_info_t * info);
109static int alloc_dio_context(me4000_info_t * info);
110static int alloc_cnt_context(me4000_info_t * info);
111static int alloc_ext_int_context(me4000_info_t * info);
112
113/*----------------------------------------------------------------------------- 96/*-----------------------------------------------------------------------------
114 Stuff used by all device parts 97 Stuff used by all device parts
115 ---------------------------------------------------------------------------*/ 98 ---------------------------------------------------------------------------*/
116static int me4000_open(struct inode *, struct file *); 99static int me4000_open(struct inode *, struct file *);
117static int me4000_release(struct inode *, struct file *); 100static int me4000_release(struct inode *, struct file *);
118 101
119static int me4000_get_user_info(me4000_user_info_t *, 102static int me4000_get_user_info(struct me4000_user_info *,
120 me4000_info_t * board_info); 103 struct me4000_info *board_info);
121static int me4000_read_procmem(char *, char **, off_t, int, int *, void *); 104static int me4000_read_procmem(char *, char **, off_t, int, int *, void *);
122 105
123/*----------------------------------------------------------------------------- 106/*-----------------------------------------------------------------------------
@@ -140,40 +123,42 @@ static int me4000_ao_ioctl_cont(struct inode *, struct file *, unsigned int,
140static unsigned int me4000_ao_poll_cont(struct file *, poll_table *); 123static unsigned int me4000_ao_poll_cont(struct file *, poll_table *);
141static int me4000_ao_fsync_cont(struct file *, struct dentry *, int); 124static int me4000_ao_fsync_cont(struct file *, struct dentry *, int);
142 125
143static int me4000_ao_start(unsigned long *, me4000_ao_context_t *); 126static int me4000_ao_start(unsigned long *, struct me4000_ao_context *);
144static int me4000_ao_stop(me4000_ao_context_t *); 127static int me4000_ao_stop(struct me4000_ao_context *);
145static int me4000_ao_immediate_stop(me4000_ao_context_t *); 128static int me4000_ao_immediate_stop(struct me4000_ao_context *);
146static int me4000_ao_timer_set_divisor(u32 *, me4000_ao_context_t *); 129static int me4000_ao_timer_set_divisor(u32 *, struct me4000_ao_context *);
147static int me4000_ao_preload(me4000_ao_context_t *); 130static int me4000_ao_preload(struct me4000_ao_context *);
148static int me4000_ao_preload_update(me4000_ao_context_t *); 131static int me4000_ao_preload_update(struct me4000_ao_context *);
149static int me4000_ao_ex_trig_set_edge(int *, me4000_ao_context_t *); 132static int me4000_ao_ex_trig_set_edge(int *, struct me4000_ao_context *);
150static int me4000_ao_ex_trig_enable(me4000_ao_context_t *); 133static int me4000_ao_ex_trig_enable(struct me4000_ao_context *);
151static int me4000_ao_ex_trig_disable(me4000_ao_context_t *); 134static int me4000_ao_ex_trig_disable(struct me4000_ao_context *);
152static int me4000_ao_prepare(me4000_ao_context_t * ao_info); 135static int me4000_ao_prepare(struct me4000_ao_context *ao_info);
153static int me4000_ao_reset(me4000_ao_context_t * ao_info); 136static int me4000_ao_reset(struct me4000_ao_context *ao_info);
154static int me4000_ao_enable_do(me4000_ao_context_t *); 137static int me4000_ao_enable_do(struct me4000_ao_context *);
155static int me4000_ao_disable_do(me4000_ao_context_t *); 138static int me4000_ao_disable_do(struct me4000_ao_context *);
156static int me4000_ao_fsm_state(int *, me4000_ao_context_t *); 139static int me4000_ao_fsm_state(int *, struct me4000_ao_context *);
157 140
158static int me4000_ao_simultaneous_ex_trig(me4000_ao_context_t * ao_context); 141static int me4000_ao_simultaneous_ex_trig(struct me4000_ao_context *ao_context);
159static int me4000_ao_simultaneous_sw(me4000_ao_context_t * ao_context); 142static int me4000_ao_simultaneous_sw(struct me4000_ao_context *ao_context);
160static int me4000_ao_simultaneous_disable(me4000_ao_context_t * ao_context); 143static int me4000_ao_simultaneous_disable(struct me4000_ao_context *ao_context);
161static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * channels, 144static int me4000_ao_simultaneous_update(
162 me4000_ao_context_t * ao_context); 145 struct me4000_ao_channel_list *channels,
163 146 struct me4000_ao_context *ao_context);
164static int me4000_ao_synchronous_ex_trig(me4000_ao_context_t * ao_context); 147
165static int me4000_ao_synchronous_sw(me4000_ao_context_t * ao_context); 148static int me4000_ao_synchronous_ex_trig(struct me4000_ao_context *ao_context);
166static int me4000_ao_synchronous_disable(me4000_ao_context_t * ao_context); 149static int me4000_ao_synchronous_sw(struct me4000_ao_context *ao_context);
150static int me4000_ao_synchronous_disable(struct me4000_ao_context *ao_context);
167 151
168static int me4000_ao_ex_trig_timeout(unsigned long *arg, 152static int me4000_ao_ex_trig_timeout(unsigned long *arg,
169 me4000_ao_context_t * ao_context); 153 struct me4000_ao_context *ao_context);
170static int me4000_ao_get_free_buffer(unsigned long *arg, 154static int me4000_ao_get_free_buffer(unsigned long *arg,
171 me4000_ao_context_t * ao_context); 155 struct me4000_ao_context *ao_context);
172 156
173/*----------------------------------------------------------------------------- 157/*-----------------------------------------------------------------------------
174 Analog input stuff 158 Analog input stuff
175 ---------------------------------------------------------------------------*/ 159 ---------------------------------------------------------------------------*/
176static int me4000_ai_single(me4000_ai_single_t *, me4000_ai_context_t *); 160static int me4000_ai_single(struct me4000_ai_single *,
161 struct me4000_ai_context *);
177static int me4000_ai_ioctl_sing(struct inode *, struct file *, unsigned int, 162static int me4000_ai_ioctl_sing(struct inode *, struct file *, unsigned int,
178 unsigned long); 163 unsigned long);
179 164
@@ -186,68 +171,69 @@ static int me4000_ai_fasync(int fd, struct file *file_p, int mode);
186static int me4000_ai_ioctl_ext(struct inode *, struct file *, unsigned int, 171static int me4000_ai_ioctl_ext(struct inode *, struct file *, unsigned int,
187 unsigned long); 172 unsigned long);
188 173
189static int me4000_ai_prepare(me4000_ai_context_t * ai_context); 174static int me4000_ai_prepare(struct me4000_ai_context *ai_context);
190static int me4000_ai_reset(me4000_ai_context_t * ai_context); 175static int me4000_ai_reset(struct me4000_ai_context *ai_context);
191static int me4000_ai_config(me4000_ai_config_t *, me4000_ai_context_t *); 176static int me4000_ai_config(struct me4000_ai_config *,
192static int me4000_ai_start(me4000_ai_context_t *); 177 struct me4000_ai_context *);
193static int me4000_ai_start_ex(unsigned long *, me4000_ai_context_t *); 178static int me4000_ai_start(struct me4000_ai_context *);
194static int me4000_ai_stop(me4000_ai_context_t *); 179static int me4000_ai_start_ex(unsigned long *, struct me4000_ai_context *);
195static int me4000_ai_immediate_stop(me4000_ai_context_t *); 180static int me4000_ai_stop(struct me4000_ai_context *);
196static int me4000_ai_ex_trig_enable(me4000_ai_context_t *); 181static int me4000_ai_immediate_stop(struct me4000_ai_context *);
197static int me4000_ai_ex_trig_disable(me4000_ai_context_t *); 182static int me4000_ai_ex_trig_enable(struct me4000_ai_context *);
198static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t *, 183static int me4000_ai_ex_trig_disable(struct me4000_ai_context *);
199 me4000_ai_context_t *); 184static int me4000_ai_ex_trig_setup(struct me4000_ai_trigger *,
200static int me4000_ai_sc_setup(me4000_ai_sc_t * arg, 185 struct me4000_ai_context *);
201 me4000_ai_context_t * ai_context); 186static int me4000_ai_sc_setup(struct me4000_ai_sc *arg,
202static int me4000_ai_offset_enable(me4000_ai_context_t * ai_context); 187 struct me4000_ai_context *ai_context);
203static int me4000_ai_offset_disable(me4000_ai_context_t * ai_context); 188static int me4000_ai_offset_enable(struct me4000_ai_context *ai_context);
204static int me4000_ai_fullscale_enable(me4000_ai_context_t * ai_context); 189static int me4000_ai_offset_disable(struct me4000_ai_context *ai_context);
205static int me4000_ai_fullscale_disable(me4000_ai_context_t * ai_context); 190static int me4000_ai_fullscale_enable(struct me4000_ai_context *ai_context);
206static int me4000_ai_fsm_state(int *arg, me4000_ai_context_t * ai_context); 191static int me4000_ai_fullscale_disable(struct me4000_ai_context *ai_context);
192static int me4000_ai_fsm_state(int *arg, struct me4000_ai_context *ai_context);
207static int me4000_ai_get_count_buffer(unsigned long *arg, 193static int me4000_ai_get_count_buffer(unsigned long *arg,
208 me4000_ai_context_t * ai_context); 194 struct me4000_ai_context *ai_context);
209 195
210/*----------------------------------------------------------------------------- 196/*-----------------------------------------------------------------------------
211 EEPROM stuff 197 EEPROM stuff
212 ---------------------------------------------------------------------------*/ 198 ---------------------------------------------------------------------------*/
213static int me4000_eeprom_read(me4000_eeprom_t * arg, 199static int me4000_eeprom_read(struct me4000_eeprom *arg,
214 me4000_ai_context_t * ai_context); 200 struct me4000_ai_context *ai_context);
215static int me4000_eeprom_write(me4000_eeprom_t * arg, 201static int me4000_eeprom_write(struct me4000_eeprom *arg,
216 me4000_ai_context_t * ai_context); 202 struct me4000_ai_context *ai_context);
217static unsigned short eeprom_read_cmd(me4000_ai_context_t * ai_context,
218 unsigned long cmd, int length);
219static int eeprom_write_cmd(me4000_ai_context_t * ai_context, unsigned long cmd,
220 int length);
221 203
222/*----------------------------------------------------------------------------- 204/*-----------------------------------------------------------------------------
223 Digital I/O stuff 205 Digital I/O stuff
224 ---------------------------------------------------------------------------*/ 206 ---------------------------------------------------------------------------*/
225static int me4000_dio_ioctl(struct inode *, struct file *, unsigned int, 207static int me4000_dio_ioctl(struct inode *, struct file *, unsigned int,
226 unsigned long); 208 unsigned long);
227static int me4000_dio_config(me4000_dio_config_t *, me4000_dio_context_t *); 209static int me4000_dio_config(struct me4000_dio_config *,
228static int me4000_dio_get_byte(me4000_dio_byte_t *, me4000_dio_context_t *); 210 struct me4000_dio_context *);
229static int me4000_dio_set_byte(me4000_dio_byte_t *, me4000_dio_context_t *); 211static int me4000_dio_get_byte(struct me4000_dio_byte *,
230static int me4000_dio_reset(me4000_dio_context_t *); 212 struct me4000_dio_context *);
213static int me4000_dio_set_byte(struct me4000_dio_byte *,
214 struct me4000_dio_context *);
215static int me4000_dio_reset(struct me4000_dio_context *);
231 216
232/*----------------------------------------------------------------------------- 217/*-----------------------------------------------------------------------------
233 Counter stuff 218 Counter stuff
234 ---------------------------------------------------------------------------*/ 219 ---------------------------------------------------------------------------*/
235static int me4000_cnt_ioctl(struct inode *, struct file *, unsigned int, 220static int me4000_cnt_ioctl(struct inode *, struct file *, unsigned int,
236 unsigned long); 221 unsigned long);
237static int me4000_cnt_config(me4000_cnt_config_t *, me4000_cnt_context_t *); 222static int me4000_cnt_config(struct me4000_cnt_config *,
238static int me4000_cnt_read(me4000_cnt_t *, me4000_cnt_context_t *); 223 struct me4000_cnt_context *);
239static int me4000_cnt_write(me4000_cnt_t *, me4000_cnt_context_t *); 224static int me4000_cnt_read(struct me4000_cnt *, struct me4000_cnt_context *);
240static int me4000_cnt_reset(me4000_cnt_context_t *); 225static int me4000_cnt_write(struct me4000_cnt *, struct me4000_cnt_context *);
226static int me4000_cnt_reset(struct me4000_cnt_context *);
241 227
242/*----------------------------------------------------------------------------- 228/*-----------------------------------------------------------------------------
243 External interrupt routines 229 External interrupt routines
244 ---------------------------------------------------------------------------*/ 230 ---------------------------------------------------------------------------*/
245static int me4000_ext_int_ioctl(struct inode *, struct file *, unsigned int, 231static int me4000_ext_int_ioctl(struct inode *, struct file *, unsigned int,
246 unsigned long); 232 unsigned long);
247static int me4000_ext_int_enable(me4000_ext_int_context_t *); 233static int me4000_ext_int_enable(struct me4000_ext_int_context *);
248static int me4000_ext_int_disable(me4000_ext_int_context_t *); 234static int me4000_ext_int_disable(struct me4000_ext_int_context *);
249static int me4000_ext_int_count(unsigned long *arg, 235static int me4000_ext_int_count(unsigned long *arg,
250 me4000_ext_int_context_t * ext_int_context); 236 struct me4000_ext_int_context *ext_int_context);
251static int me4000_ext_int_fasync(int fd, struct file *file_ptr, int mode); 237static int me4000_ext_int_fasync(int fd, struct file *file_ptr, int mode);
252 238
253/*----------------------------------------------------------------------------- 239/*-----------------------------------------------------------------------------
@@ -260,27 +246,18 @@ static irqreturn_t me4000_ext_int_isr(int, void *);
260/*----------------------------------------------------------------------------- 246/*-----------------------------------------------------------------------------
261 Inline functions 247 Inline functions
262 ---------------------------------------------------------------------------*/ 248 ---------------------------------------------------------------------------*/
263static int inline me4000_buf_count(me4000_circ_buf_t, int);
264static int inline me4000_buf_space(me4000_circ_buf_t, int);
265static int inline me4000_space_to_end(me4000_circ_buf_t, int);
266static int inline me4000_values_to_end(me4000_circ_buf_t, int);
267
268static void inline me4000_outb(unsigned char value, unsigned long port);
269static void inline me4000_outl(unsigned long value, unsigned long port);
270static unsigned long inline me4000_inl(unsigned long port);
271static unsigned char inline me4000_inb(unsigned long port);
272 249
273static int me4000_buf_count(me4000_circ_buf_t buf, int size) 250static int inline me4000_buf_count(struct me4000_circ_buf buf, int size)
274{ 251{
275 return ((buf.head - buf.tail) & (size - 1)); 252 return ((buf.head - buf.tail) & (size - 1));
276} 253}
277 254
278static int me4000_buf_space(me4000_circ_buf_t buf, int size) 255static int inline me4000_buf_space(struct me4000_circ_buf buf, int size)
279{ 256{
280 return ((buf.tail - (buf.head + 1)) & (size - 1)); 257 return ((buf.tail - (buf.head + 1)) & (size - 1));
281} 258}
282 259
283static int me4000_values_to_end(me4000_circ_buf_t buf, int size) 260static int inline me4000_values_to_end(struct me4000_circ_buf buf, int size)
284{ 261{
285 int end; 262 int end;
286 int n; 263 int n;
@@ -289,7 +266,7 @@ static int me4000_values_to_end(me4000_circ_buf_t buf, int size)
289 return (n < end) ? n : end; 266 return (n < end) ? n : end;
290} 267}
291 268
292static int me4000_space_to_end(me4000_circ_buf_t buf, int size) 269static int inline me4000_space_to_end(struct me4000_circ_buf buf, int size)
293{ 270{
294 int end; 271 int end;
295 int n; 272 int n;
@@ -299,19 +276,19 @@ static int me4000_space_to_end(me4000_circ_buf_t buf, int size)
299 return (n <= end) ? n : (end + 1); 276 return (n <= end) ? n : (end + 1);
300} 277}
301 278
302static void me4000_outb(unsigned char value, unsigned long port) 279static void inline me4000_outb(unsigned char value, unsigned long port)
303{ 280{
304 PORT_PDEBUG("--> 0x%02X port 0x%04lX\n", value, port); 281 PORT_PDEBUG("--> 0x%02X port 0x%04lX\n", value, port);
305 outb(value, port); 282 outb(value, port);
306} 283}
307 284
308static void me4000_outl(unsigned long value, unsigned long port) 285static void inline me4000_outl(unsigned long value, unsigned long port)
309{ 286{
310 PORT_PDEBUG("--> 0x%08lX port 0x%04lX\n", value, port); 287 PORT_PDEBUG("--> 0x%08lX port 0x%04lX\n", value, port);
311 outl(value, port); 288 outl(value, port);
312} 289}
313 290
314static unsigned long me4000_inl(unsigned long port) 291static unsigned long inline me4000_inl(unsigned long port)
315{ 292{
316 unsigned long value; 293 unsigned long value;
317 value = inl(port); 294 value = inl(port);
@@ -319,7 +296,7 @@ static unsigned long me4000_inl(unsigned long port)
319 return value; 296 return value;
320} 297}
321 298
322static unsigned char me4000_inb(unsigned long port) 299static unsigned char inline me4000_inb(unsigned long port)
323{ 300{
324 unsigned char value; 301 unsigned char value;
325 value = inb(port); 302 value = inb(port);
@@ -327,102 +304,102 @@ static unsigned char me4000_inb(unsigned long port)
327 return value; 304 return value;
328} 305}
329 306
330struct pci_driver me4000_driver = { 307static struct pci_driver me4000_driver = {
331 .name = ME4000_NAME, 308 .name = ME4000_NAME,
332 .id_table = me4000_pci_table, 309 .id_table = me4000_pci_table,
333 .probe = me4000_probe 310 .probe = me4000_probe
334}; 311};
335 312
336static struct file_operations me4000_ao_fops_sing = { 313static struct file_operations me4000_ao_fops_sing = {
337 owner:THIS_MODULE, 314 .owner = THIS_MODULE,
338 write:me4000_ao_write_sing, 315 .write = me4000_ao_write_sing,
339 ioctl:me4000_ao_ioctl_sing, 316 .ioctl = me4000_ao_ioctl_sing,
340 open:me4000_open, 317 .open = me4000_open,
341 release:me4000_release, 318 .release = me4000_release,
342}; 319};
343 320
344static struct file_operations me4000_ao_fops_wrap = { 321static struct file_operations me4000_ao_fops_wrap = {
345 owner:THIS_MODULE, 322 .owner = THIS_MODULE,
346 write:me4000_ao_write_wrap, 323 .write = me4000_ao_write_wrap,
347 ioctl:me4000_ao_ioctl_wrap, 324 .ioctl = me4000_ao_ioctl_wrap,
348 open:me4000_open, 325 .open = me4000_open,
349 release:me4000_release, 326 .release = me4000_release,
350}; 327};
351 328
352static struct file_operations me4000_ao_fops_cont = { 329static struct file_operations me4000_ao_fops_cont = {
353 owner:THIS_MODULE, 330 .owner = THIS_MODULE,
354 write:me4000_ao_write_cont, 331 .write = me4000_ao_write_cont,
355 poll:me4000_ao_poll_cont, 332 .poll = me4000_ao_poll_cont,
356 ioctl:me4000_ao_ioctl_cont, 333 .ioctl = me4000_ao_ioctl_cont,
357 open:me4000_open, 334 .open = me4000_open,
358 release:me4000_release, 335 .release = me4000_release,
359 fsync:me4000_ao_fsync_cont, 336 .fsync = me4000_ao_fsync_cont,
360}; 337};
361 338
362static struct file_operations me4000_ai_fops_sing = { 339static struct file_operations me4000_ai_fops_sing = {
363 owner:THIS_MODULE, 340 .owner = THIS_MODULE,
364 ioctl:me4000_ai_ioctl_sing, 341 .ioctl = me4000_ai_ioctl_sing,
365 open:me4000_open, 342 .open = me4000_open,
366 release:me4000_release, 343 .release = me4000_release,
367}; 344};
368 345
369static struct file_operations me4000_ai_fops_cont_sw = { 346static struct file_operations me4000_ai_fops_cont_sw = {
370 owner:THIS_MODULE, 347 .owner = THIS_MODULE,
371 read:me4000_ai_read, 348 .read = me4000_ai_read,
372 poll:me4000_ai_poll, 349 .poll = me4000_ai_poll,
373 ioctl:me4000_ai_ioctl_sw, 350 .ioctl = me4000_ai_ioctl_sw,
374 open:me4000_open, 351 .open = me4000_open,
375 release:me4000_release, 352 .release = me4000_release,
376 fasync:me4000_ai_fasync, 353 .fasync = me4000_ai_fasync,
377}; 354};
378 355
379static struct file_operations me4000_ai_fops_cont_et = { 356static struct file_operations me4000_ai_fops_cont_et = {
380 owner:THIS_MODULE, 357 .owner = THIS_MODULE,
381 read:me4000_ai_read, 358 .read = me4000_ai_read,
382 poll:me4000_ai_poll, 359 .poll = me4000_ai_poll,
383 ioctl:me4000_ai_ioctl_ext, 360 .ioctl = me4000_ai_ioctl_ext,
384 open:me4000_open, 361 .open = me4000_open,
385 release:me4000_release, 362 .release = me4000_release,
386}; 363};
387 364
388static struct file_operations me4000_ai_fops_cont_et_value = { 365static struct file_operations me4000_ai_fops_cont_et_value = {
389 owner:THIS_MODULE, 366 .owner = THIS_MODULE,
390 read:me4000_ai_read, 367 .read = me4000_ai_read,
391 poll:me4000_ai_poll, 368 .poll = me4000_ai_poll,
392 ioctl:me4000_ai_ioctl_ext, 369 .ioctl = me4000_ai_ioctl_ext,
393 open:me4000_open, 370 .open = me4000_open,
394 release:me4000_release, 371 .release = me4000_release,
395}; 372};
396 373
397static struct file_operations me4000_ai_fops_cont_et_chanlist = { 374static struct file_operations me4000_ai_fops_cont_et_chanlist = {
398 owner:THIS_MODULE, 375 .owner = THIS_MODULE,
399 read:me4000_ai_read, 376 .read = me4000_ai_read,
400 poll:me4000_ai_poll, 377 .poll = me4000_ai_poll,
401 ioctl:me4000_ai_ioctl_ext, 378 .ioctl = me4000_ai_ioctl_ext,
402 open:me4000_open, 379 .open = me4000_open,
403 release:me4000_release, 380 .release = me4000_release,
404}; 381};
405 382
406static struct file_operations me4000_dio_fops = { 383static struct file_operations me4000_dio_fops = {
407 owner:THIS_MODULE, 384 .owner = THIS_MODULE,
408 ioctl:me4000_dio_ioctl, 385 .ioctl = me4000_dio_ioctl,
409 open:me4000_open, 386 .open = me4000_open,
410 release:me4000_release, 387 .release = me4000_release,
411}; 388};
412 389
413static struct file_operations me4000_cnt_fops = { 390static struct file_operations me4000_cnt_fops = {
414 owner:THIS_MODULE, 391 .owner = THIS_MODULE,
415 ioctl:me4000_cnt_ioctl, 392 .ioctl = me4000_cnt_ioctl,
416 open:me4000_open, 393 .open = me4000_open,
417 release:me4000_release, 394 .release = me4000_release,
418}; 395};
419 396
420static struct file_operations me4000_ext_int_fops = { 397static struct file_operations me4000_ext_int_fops = {
421 owner:THIS_MODULE, 398 .owner = THIS_MODULE,
422 ioctl:me4000_ext_int_ioctl, 399 .ioctl = me4000_ext_int_ioctl,
423 open:me4000_open, 400 .open = me4000_open,
424 release:me4000_release, 401 .release = me4000_release,
425 fasync:me4000_ext_int_fasync, 402 .fasync = me4000_ext_int_fasync,
426}; 403};
427 404
428static struct file_operations *me4000_ao_fops_array[] = { 405static struct file_operations *me4000_ao_fops_array[] = {
@@ -439,9 +416,9 @@ static struct file_operations *me4000_ai_fops_array[] = {
439 &me4000_ai_fops_cont_et_chanlist, // work through one channel list by external trigger 416 &me4000_ai_fops_cont_et_chanlist, // work through one channel list by external trigger
440}; 417};
441 418
442int __init me4000_init_module(void) 419static int __init me4000_init_module(void)
443{ 420{
444 int result = 0; 421 int result;
445 422
446 CALL_PDEBUG("init_module() is executed\n"); 423 CALL_PDEBUG("init_module() is executed\n");
447 424
@@ -533,26 +510,26 @@ int __init me4000_init_module(void)
533 510
534 return 0; 511 return 0;
535 512
536 INIT_ERROR_7: 513INIT_ERROR_7:
537 unregister_chrdev(me4000_ext_int_major_driver_no, ME4000_EXT_INT_NAME); 514 unregister_chrdev(me4000_ext_int_major_driver_no, ME4000_EXT_INT_NAME);
538 515
539 INIT_ERROR_6: 516INIT_ERROR_6:
540 unregister_chrdev(me4000_cnt_major_driver_no, ME4000_CNT_NAME); 517 unregister_chrdev(me4000_cnt_major_driver_no, ME4000_CNT_NAME);
541 518
542 INIT_ERROR_5: 519INIT_ERROR_5:
543 unregister_chrdev(me4000_dio_major_driver_no, ME4000_DIO_NAME); 520 unregister_chrdev(me4000_dio_major_driver_no, ME4000_DIO_NAME);
544 521
545 INIT_ERROR_4: 522INIT_ERROR_4:
546 unregister_chrdev(me4000_ai_major_driver_no, ME4000_AI_NAME); 523 unregister_chrdev(me4000_ai_major_driver_no, ME4000_AI_NAME);
547 524
548 INIT_ERROR_3: 525INIT_ERROR_3:
549 unregister_chrdev(me4000_ao_major_driver_no, ME4000_AO_NAME); 526 unregister_chrdev(me4000_ao_major_driver_no, ME4000_AO_NAME);
550 527
551 INIT_ERROR_2: 528INIT_ERROR_2:
552 pci_unregister_driver(&me4000_driver); 529 pci_unregister_driver(&me4000_driver);
553 clear_board_info_list(); 530 clear_board_info_list();
554 531
555 INIT_ERROR_1: 532INIT_ERROR_1:
556 return result; 533 return result;
557} 534}
558 535
@@ -562,18 +539,18 @@ static void clear_board_info_list(void)
562{ 539{
563 struct list_head *board_p; 540 struct list_head *board_p;
564 struct list_head *dac_p; 541 struct list_head *dac_p;
565 me4000_info_t *board_info; 542 struct me4000_info *board_info;
566 me4000_ao_context_t *ao_context; 543 struct me4000_ao_context *ao_context;
567 544
568 /* Clear context lists */ 545 /* Clear context lists */
569 for (board_p = me4000_board_info_list.next; 546 for (board_p = me4000_board_info_list.next;
570 board_p != &me4000_board_info_list; board_p = board_p->next) { 547 board_p != &me4000_board_info_list; board_p = board_p->next) {
571 board_info = list_entry(board_p, me4000_info_t, list); 548 board_info = list_entry(board_p, struct me4000_info, list);
572 /* Clear analog output context list */ 549 /* Clear analog output context list */
573 while (!list_empty(&board_info->ao_context_list)) { 550 while (!list_empty(&board_info->ao_context_list)) {
574 dac_p = board_info->ao_context_list.next; 551 dac_p = board_info->ao_context_list.next;
575 ao_context = 552 ao_context =
576 list_entry(dac_p, me4000_ao_context_t, list); 553 list_entry(dac_p, struct me4000_ao_context, list);
577 me4000_ao_reset(ao_context); 554 me4000_ao_reset(ao_context);
578 free_irq(ao_context->irq, ao_context); 555 free_irq(ao_context->irq, ao_context);
579 if (ao_context->circ_buf.buf) 556 if (ao_context->circ_buf.buf)
@@ -600,14 +577,14 @@ static void clear_board_info_list(void)
600 /* Clear the board info list */ 577 /* Clear the board info list */
601 while (!list_empty(&me4000_board_info_list)) { 578 while (!list_empty(&me4000_board_info_list)) {
602 board_p = me4000_board_info_list.next; 579 board_p = me4000_board_info_list.next;
603 board_info = list_entry(board_p, me4000_info_t, list); 580 board_info = list_entry(board_p, struct me4000_info, list);
604 pci_release_regions(board_info->pci_dev_p); 581 pci_release_regions(board_info->pci_dev_p);
605 list_del(board_p); 582 list_del(board_p);
606 kfree(board_info); 583 kfree(board_info);
607 } 584 }
608} 585}
609 586
610static int get_registers(struct pci_dev *dev, me4000_info_t * board_info) 587static int get_registers(struct pci_dev *dev, struct me4000_info *board_info)
611{ 588{
612 589
613 /*--------------------------- plx regbase ---------------------------------*/ 590 /*--------------------------- plx regbase ---------------------------------*/
@@ -667,20 +644,20 @@ static int get_registers(struct pci_dev *dev, me4000_info_t * board_info)
667} 644}
668 645
669static int init_board_info(struct pci_dev *pci_dev_p, 646static int init_board_info(struct pci_dev *pci_dev_p,
670 me4000_info_t * board_info) 647 struct me4000_info *board_info)
671{ 648{
672 int i; 649 int i;
673 int result; 650 int result;
674 struct list_head *board_p; 651 struct list_head *board_p;
675 board_info->pci_dev_p = pci_dev_p; 652 board_info->pci_dev_p = pci_dev_p;
676 653
677 for (i = 0; i < ME4000_BOARD_VERSIONS; i++) { 654 for (i = 0; i < ARRAY_SIZE(me4000_boards); i++) {
678 if (me4000_boards[i].device_id == pci_dev_p->device) { 655 if (me4000_boards[i].device_id == pci_dev_p->device) {
679 board_info->board_p = &me4000_boards[i]; 656 board_info->board_p = &me4000_boards[i];
680 break; 657 break;
681 } 658 }
682 } 659 }
683 if (i == ME4000_BOARD_VERSIONS) { 660 if (i == ARRAY_SIZE(me4000_boards)) {
684 printk(KERN_ERR 661 printk(KERN_ERR
685 "ME4000:init_board_info():Device ID not valid\n"); 662 "ME4000:init_board_info():Device ID not valid\n");
686 return -ENODEV; 663 return -ENODEV;
@@ -755,21 +732,21 @@ static int init_board_info(struct pci_dev *pci_dev_p,
755 return 0; 732 return 0;
756} 733}
757 734
758static int alloc_ao_contexts(me4000_info_t * info) 735static int alloc_ao_contexts(struct me4000_info *info)
759{ 736{
760 int i; 737 int i;
761 int err; 738 int err;
762 me4000_ao_context_t *ao_context; 739 struct me4000_ao_context *ao_context;
763 740
764 for (i = 0; i < info->board_p->ao.count; i++) { 741 for (i = 0; i < info->board_p->ao.count; i++) {
765 ao_context = kmalloc(sizeof(me4000_ao_context_t), GFP_KERNEL); 742 ao_context = kzalloc(sizeof(struct me4000_ao_context),
743 GFP_KERNEL);
766 if (!ao_context) { 744 if (!ao_context) {
767 printk(KERN_ERR 745 printk(KERN_ERR
768 "alloc_ao_contexts():Can't get memory for ao context\n"); 746 "alloc_ao_contexts():Can't get memory for ao context\n");
769 release_ao_contexts(info); 747 release_ao_contexts(info);
770 return -ENOMEM; 748 return -ENOMEM;
771 } 749 }
772 memset(ao_context, 0, sizeof(me4000_ao_context_t));
773 750
774 spin_lock_init(&ao_context->use_lock); 751 spin_lock_init(&ao_context->use_lock);
775 spin_lock_init(&ao_context->int_lock); 752 spin_lock_init(&ao_context->int_lock);
@@ -780,15 +757,13 @@ static int alloc_ao_contexts(me4000_info_t * info)
780 if (info->board_p->ao.fifo_count) { 757 if (info->board_p->ao.fifo_count) {
781 /* Allocate circular buffer */ 758 /* Allocate circular buffer */
782 ao_context->circ_buf.buf = 759 ao_context->circ_buf.buf =
783 kmalloc(ME4000_AO_BUFFER_SIZE, GFP_KERNEL); 760 kzalloc(ME4000_AO_BUFFER_SIZE, GFP_KERNEL);
784 if (!ao_context->circ_buf.buf) { 761 if (!ao_context->circ_buf.buf) {
785 printk(KERN_ERR 762 printk(KERN_ERR
786 "alloc_ao_contexts():Can't get circular buffer\n"); 763 "alloc_ao_contexts():Can't get circular buffer\n");
787 release_ao_contexts(info); 764 release_ao_contexts(info);
788 return -ENOMEM; 765 return -ENOMEM;
789 } 766 }
790 memset(ao_context->circ_buf.buf, 0,
791 ME4000_AO_BUFFER_SIZE);
792 767
793 /* Clear the circular buffer */ 768 /* Clear the circular buffer */
794 ao_context->circ_buf.head = 0; 769 ao_context->circ_buf.head = 0;
@@ -872,9 +847,8 @@ static int alloc_ao_contexts(me4000_info_t * info)
872 ME4000_NAME, ao_context); 847 ME4000_NAME, ao_context);
873 if (err) { 848 if (err) {
874 printk(KERN_ERR 849 printk(KERN_ERR
875 "alloc_ao_contexts():Can't get interrupt line"); 850 "%s:Can't get interrupt line", __func__);
876 if (ao_context->circ_buf.buf) 851 kfree(ao_context->circ_buf.buf);
877 kfree(ao_context->circ_buf.buf);
878 kfree(ao_context); 852 kfree(ao_context);
879 release_ao_contexts(info); 853 release_ao_contexts(info);
880 return -ENODEV; 854 return -ENODEV;
@@ -888,35 +862,34 @@ static int alloc_ao_contexts(me4000_info_t * info)
888 return 0; 862 return 0;
889} 863}
890 864
891static void release_ao_contexts(me4000_info_t * board_info) 865static void release_ao_contexts(struct me4000_info *board_info)
892{ 866{
893 struct list_head *dac_p; 867 struct list_head *dac_p;
894 me4000_ao_context_t *ao_context; 868 struct me4000_ao_context *ao_context;
895 869
896 /* Clear analog output context list */ 870 /* Clear analog output context list */
897 while (!list_empty(&board_info->ao_context_list)) { 871 while (!list_empty(&board_info->ao_context_list)) {
898 dac_p = board_info->ao_context_list.next; 872 dac_p = board_info->ao_context_list.next;
899 ao_context = list_entry(dac_p, me4000_ao_context_t, list); 873 ao_context = list_entry(dac_p, struct me4000_ao_context, list);
900 free_irq(ao_context->irq, ao_context); 874 free_irq(ao_context->irq, ao_context);
901 if (ao_context->circ_buf.buf) 875 kfree(ao_context->circ_buf.buf);
902 kfree(ao_context->circ_buf.buf);
903 list_del(dac_p); 876 list_del(dac_p);
904 kfree(ao_context); 877 kfree(ao_context);
905 } 878 }
906} 879}
907 880
908static int alloc_ai_context(me4000_info_t * info) 881static int alloc_ai_context(struct me4000_info *info)
909{ 882{
910 me4000_ai_context_t *ai_context; 883 struct me4000_ai_context *ai_context;
911 884
912 if (info->board_p->ai.count) { 885 if (info->board_p->ai.count) {
913 ai_context = kmalloc(sizeof(me4000_ai_context_t), GFP_KERNEL); 886 ai_context = kzalloc(sizeof(struct me4000_ai_context),
887 GFP_KERNEL);
914 if (!ai_context) { 888 if (!ai_context) {
915 printk(KERN_ERR 889 printk(KERN_ERR
916 "ME4000:alloc_ai_context():Can't get memory for ai context\n"); 890 "ME4000:alloc_ai_context():Can't get memory for ai context\n");
917 return -ENOMEM; 891 return -ENOMEM;
918 } 892 }
919 memset(ai_context, 0, sizeof(me4000_ai_context_t));
920 893
921 info->ai_context = ai_context; 894 info->ai_context = ai_context;
922 895
@@ -958,18 +931,18 @@ static int alloc_ai_context(me4000_info_t * info)
958 return 0; 931 return 0;
959} 932}
960 933
961static int alloc_dio_context(me4000_info_t * info) 934static int alloc_dio_context(struct me4000_info *info)
962{ 935{
963 me4000_dio_context_t *dio_context; 936 struct me4000_dio_context *dio_context;
964 937
965 if (info->board_p->dio.count) { 938 if (info->board_p->dio.count) {
966 dio_context = kmalloc(sizeof(me4000_dio_context_t), GFP_KERNEL); 939 dio_context = kzalloc(sizeof(struct me4000_dio_context),
940 GFP_KERNEL);
967 if (!dio_context) { 941 if (!dio_context) {
968 printk(KERN_ERR 942 printk(KERN_ERR
969 "ME4000:alloc_dio_context():Can't get memory for dio context\n"); 943 "ME4000:alloc_dio_context():Can't get memory for dio context\n");
970 return -ENOMEM; 944 return -ENOMEM;
971 } 945 }
972 memset(dio_context, 0, sizeof(me4000_dio_context_t));
973 946
974 info->dio_context = dio_context; 947 info->dio_context = dio_context;
975 948
@@ -995,18 +968,18 @@ static int alloc_dio_context(me4000_info_t * info)
995 return 0; 968 return 0;
996} 969}
997 970
998static int alloc_cnt_context(me4000_info_t * info) 971static int alloc_cnt_context(struct me4000_info *info)
999{ 972{
1000 me4000_cnt_context_t *cnt_context; 973 struct me4000_cnt_context *cnt_context;
1001 974
1002 if (info->board_p->cnt.count) { 975 if (info->board_p->cnt.count) {
1003 cnt_context = kmalloc(sizeof(me4000_cnt_context_t), GFP_KERNEL); 976 cnt_context = kzalloc(sizeof(struct me4000_cnt_context),
977 GFP_KERNEL);
1004 if (!cnt_context) { 978 if (!cnt_context) {
1005 printk(KERN_ERR 979 printk(KERN_ERR
1006 "ME4000:alloc_cnt_context():Can't get memory for cnt context\n"); 980 "ME4000:alloc_cnt_context():Can't get memory for cnt context\n");
1007 return -ENOMEM; 981 return -ENOMEM;
1008 } 982 }
1009 memset(cnt_context, 0, sizeof(me4000_cnt_context_t));
1010 983
1011 info->cnt_context = cnt_context; 984 info->cnt_context = cnt_context;
1012 985
@@ -1026,19 +999,18 @@ static int alloc_cnt_context(me4000_info_t * info)
1026 return 0; 999 return 0;
1027} 1000}
1028 1001
1029static int alloc_ext_int_context(me4000_info_t * info) 1002static int alloc_ext_int_context(struct me4000_info *info)
1030{ 1003{
1031 me4000_ext_int_context_t *ext_int_context; 1004 struct me4000_ext_int_context *ext_int_context;
1032 1005
1033 if (info->board_p->cnt.count) { 1006 if (info->board_p->cnt.count) {
1034 ext_int_context = 1007 ext_int_context =
1035 kmalloc(sizeof(me4000_ext_int_context_t), GFP_KERNEL); 1008 kzalloc(sizeof(struct me4000_ext_int_context), GFP_KERNEL);
1036 if (!ext_int_context) { 1009 if (!ext_int_context) {
1037 printk(KERN_ERR 1010 printk(KERN_ERR
1038 "ME4000:alloc_ext_int_context():Can't get memory for cnt context\n"); 1011 "ME4000:alloc_ext_int_context():Can't get memory for cnt context\n");
1039 return -ENOMEM; 1012 return -ENOMEM;
1040 } 1013 }
1041 memset(ext_int_context, 0, sizeof(me4000_ext_int_context_t));
1042 1014
1043 info->ext_int_context = ext_int_context; 1015 info->ext_int_context = ext_int_context;
1044 1016
@@ -1060,19 +1032,18 @@ static int alloc_ext_int_context(me4000_info_t * info)
1060static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id) 1032static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id)
1061{ 1033{
1062 int result = 0; 1034 int result = 0;
1063 me4000_info_t *board_info; 1035 struct me4000_info *board_info;
1064 1036
1065 CALL_PDEBUG("me4000_probe() is executed\n"); 1037 CALL_PDEBUG("me4000_probe() is executed\n");
1066 1038
1067 /* Allocate structure for board context */ 1039 /* Allocate structure for board context */
1068 board_info = kmalloc(sizeof(me4000_info_t), GFP_KERNEL); 1040 board_info = kzalloc(sizeof(struct me4000_info), GFP_KERNEL);
1069 if (!board_info) { 1041 if (!board_info) {
1070 printk(KERN_ERR 1042 printk(KERN_ERR
1071 "ME4000:Can't get memory for board info structure\n"); 1043 "ME4000:Can't get memory for board info structure\n");
1072 result = -ENOMEM; 1044 result = -ENOMEM;
1073 goto PROBE_ERROR_1; 1045 goto PROBE_ERROR_1;
1074 } 1046 }
1075 memset(board_info, 0, sizeof(me4000_info_t));
1076 1047
1077 /* Add to global linked list */ 1048 /* Add to global linked list */
1078 list_add_tail(&board_info->list, &me4000_board_info_list); 1049 list_add_tail(&board_info->list, &me4000_board_info_list);
@@ -1080,70 +1051,70 @@ static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id)
1080 /* Get the PCI base registers */ 1051 /* Get the PCI base registers */
1081 result = get_registers(dev, board_info); 1052 result = get_registers(dev, board_info);
1082 if (result) { 1053 if (result) {
1083 printk(KERN_ERR "me4000_probe():Cannot get registers\n"); 1054 printk(KERN_ERR "%s:Cannot get registers\n", __func__);
1084 goto PROBE_ERROR_2; 1055 goto PROBE_ERROR_2;
1085 } 1056 }
1086 1057
1087 /* Enable the device */ 1058 /* Enable the device */
1088 result = pci_enable_device(dev); 1059 result = pci_enable_device(dev);
1089 if (result < 0) { 1060 if (result < 0) {
1090 printk(KERN_ERR "me4000_probe():Cannot enable PCI device\n"); 1061 printk(KERN_ERR "%s:Cannot enable PCI device\n", __func__);
1091 goto PROBE_ERROR_2; 1062 goto PROBE_ERROR_2;
1092 } 1063 }
1093 1064
1094 /* Request the PCI register regions */ 1065 /* Request the PCI register regions */
1095 result = pci_request_regions(dev, ME4000_NAME); 1066 result = pci_request_regions(dev, ME4000_NAME);
1096 if (result < 0) { 1067 if (result < 0) {
1097 printk(KERN_ERR "me4000_probe():Cannot request I/O regions\n"); 1068 printk(KERN_ERR "%s:Cannot request I/O regions\n", __func__);
1098 goto PROBE_ERROR_2; 1069 goto PROBE_ERROR_2;
1099 } 1070 }
1100 1071
1101 /* Initialize board info */ 1072 /* Initialize board info */
1102 result = init_board_info(dev, board_info); 1073 result = init_board_info(dev, board_info);
1103 if (result) { 1074 if (result) {
1104 printk(KERN_ERR "me4000_probe():Cannot init baord info\n"); 1075 printk(KERN_ERR "%s:Cannot init baord info\n", __func__);
1105 goto PROBE_ERROR_3; 1076 goto PROBE_ERROR_3;
1106 } 1077 }
1107 1078
1108 /* Download the xilinx firmware */ 1079 /* Download the xilinx firmware */
1109 result = me4000_xilinx_download(board_info); 1080 result = me4000_xilinx_download(board_info);
1110 if (result) { 1081 if (result) {
1111 printk(KERN_ERR "me4000_probe:Can't download firmware\n"); 1082 printk(KERN_ERR "%s:Can't download firmware\n", __func__);
1112 goto PROBE_ERROR_3; 1083 goto PROBE_ERROR_3;
1113 } 1084 }
1114 1085
1115 /* Make a hardware reset */ 1086 /* Make a hardware reset */
1116 result = me4000_reset_board(board_info); 1087 result = me4000_reset_board(board_info);
1117 if (result) { 1088 if (result) {
1118 printk(KERN_ERR "me4000_probe:Can't reset board\n"); 1089 printk(KERN_ERR "%s :Can't reset board\n", __func__);
1119 goto PROBE_ERROR_3; 1090 goto PROBE_ERROR_3;
1120 } 1091 }
1121 1092
1122 /* Allocate analog output context structures */ 1093 /* Allocate analog output context structures */
1123 result = alloc_ao_contexts(board_info); 1094 result = alloc_ao_contexts(board_info);
1124 if (result) { 1095 if (result) {
1125 printk(KERN_ERR "me4000_probe():Cannot allocate ao contexts\n"); 1096 printk(KERN_ERR "%s:Cannot allocate ao contexts\n", __func__);
1126 goto PROBE_ERROR_3; 1097 goto PROBE_ERROR_3;
1127 } 1098 }
1128 1099
1129 /* Allocate analog input context */ 1100 /* Allocate analog input context */
1130 result = alloc_ai_context(board_info); 1101 result = alloc_ai_context(board_info);
1131 if (result) { 1102 if (result) {
1132 printk(KERN_ERR "me4000_probe():Cannot allocate ai context\n"); 1103 printk(KERN_ERR "%s:Cannot allocate ai context\n", __func__);
1133 goto PROBE_ERROR_4; 1104 goto PROBE_ERROR_4;
1134 } 1105 }
1135 1106
1136 /* Allocate digital I/O context */ 1107 /* Allocate digital I/O context */
1137 result = alloc_dio_context(board_info); 1108 result = alloc_dio_context(board_info);
1138 if (result) { 1109 if (result) {
1139 printk(KERN_ERR "me4000_probe():Cannot allocate dio context\n"); 1110 printk(KERN_ERR "%s:Cannot allocate dio context\n", __func__);
1140 goto PROBE_ERROR_5; 1111 goto PROBE_ERROR_5;
1141 } 1112 }
1142 1113
1143 /* Allocate counter context */ 1114 /* Allocate counter context */
1144 result = alloc_cnt_context(board_info); 1115 result = alloc_cnt_context(board_info);
1145 if (result) { 1116 if (result) {
1146 printk(KERN_ERR "me4000_probe():Cannot allocate cnt context\n"); 1117 printk(KERN_ERR "%s:Cannot allocate cnt context\n", __func__);
1147 goto PROBE_ERROR_6; 1118 goto PROBE_ERROR_6;
1148 } 1119 }
1149 1120
@@ -1151,36 +1122,36 @@ static int me4000_probe(struct pci_dev *dev, const struct pci_device_id *id)
1151 result = alloc_ext_int_context(board_info); 1122 result = alloc_ext_int_context(board_info);
1152 if (result) { 1123 if (result) {
1153 printk(KERN_ERR 1124 printk(KERN_ERR
1154 "me4000_probe():Cannot allocate ext_int context\n"); 1125 "%s:Cannot allocate ext_int context\n", __func__);
1155 goto PROBE_ERROR_7; 1126 goto PROBE_ERROR_7;
1156 } 1127 }
1157 1128
1158 return 0; 1129 return 0;
1159 1130
1160 PROBE_ERROR_7: 1131PROBE_ERROR_7:
1161 kfree(board_info->cnt_context); 1132 kfree(board_info->cnt_context);
1162 1133
1163 PROBE_ERROR_6: 1134PROBE_ERROR_6:
1164 kfree(board_info->dio_context); 1135 kfree(board_info->dio_context);
1165 1136
1166 PROBE_ERROR_5: 1137PROBE_ERROR_5:
1167 kfree(board_info->ai_context); 1138 kfree(board_info->ai_context);
1168 1139
1169 PROBE_ERROR_4: 1140PROBE_ERROR_4:
1170 release_ao_contexts(board_info); 1141 release_ao_contexts(board_info);
1171 1142
1172 PROBE_ERROR_3: 1143PROBE_ERROR_3:
1173 pci_release_regions(dev); 1144 pci_release_regions(dev);
1174 1145
1175 PROBE_ERROR_2: 1146PROBE_ERROR_2:
1176 list_del(&board_info->list); 1147 list_del(&board_info->list);
1177 kfree(board_info); 1148 kfree(board_info);
1178 1149
1179 PROBE_ERROR_1: 1150PROBE_ERROR_1:
1180 return result; 1151 return result;
1181} 1152}
1182 1153
1183static int me4000_xilinx_download(me4000_info_t * info) 1154static int me4000_xilinx_download(struct me4000_info *info)
1184{ 1155{
1185 int size = 0; 1156 int size = 0;
1186 u32 value = 0; 1157 u32 value = 0;
@@ -1211,7 +1182,7 @@ static int me4000_xilinx_download(me4000_info_t * info)
1211 /* Wait until /INIT pin is set */ 1182 /* Wait until /INIT pin is set */
1212 udelay(20); 1183 udelay(20);
1213 if (!inl(info->plx_regbase + PLX_INTCSR) & 0x20) { 1184 if (!inl(info->plx_regbase + PLX_INTCSR) & 0x20) {
1214 printk(KERN_ERR "me4000_xilinx_download():Can't init Xilinx\n"); 1185 printk(KERN_ERR "%s:Can't init Xilinx\n", __func__);
1215 return -EIO; 1186 return -EIO;
1216 } 1187 }
1217 1188
@@ -1232,7 +1203,7 @@ static int me4000_xilinx_download(me4000_info_t * info)
1232 /* Check if BUSY flag is low */ 1203 /* Check if BUSY flag is low */
1233 if (inl(info->plx_regbase + PLX_ICR) & 0x20) { 1204 if (inl(info->plx_regbase + PLX_ICR) & 0x20) {
1234 printk(KERN_ERR 1205 printk(KERN_ERR
1235 "me4000_xilinx_download():Xilinx is still busy (idx = %d)\n", 1206 "%s:Xilinx is still busy (idx = %d)\n", __func__,
1236 idx); 1207 idx);
1237 return -EIO; 1208 return -EIO;
1238 } 1209 }
@@ -1246,9 +1217,9 @@ static int me4000_xilinx_download(me4000_info_t * info)
1246 PDEBUG("me4000_xilinx_download():Download was successful\n"); 1217 PDEBUG("me4000_xilinx_download():Download was successful\n");
1247 } else { 1218 } else {
1248 printk(KERN_ERR 1219 printk(KERN_ERR
1249 "ME4000:me4000_xilinx_download():DONE flag is not set\n"); 1220 "ME4000:%s:DONE flag is not set\n", __func__);
1250 printk(KERN_ERR 1221 printk(KERN_ERR
1251 "ME4000:me4000_xilinx_download():Download not succesful\n"); 1222 "ME4000:%s:Download not succesful\n", __func__);
1252 return -EIO; 1223 return -EIO;
1253 } 1224 }
1254 1225
@@ -1260,7 +1231,7 @@ static int me4000_xilinx_download(me4000_info_t * info)
1260 return 0; 1231 return 0;
1261} 1232}
1262 1233
1263static int me4000_reset_board(me4000_info_t * info) 1234static int me4000_reset_board(struct me4000_info *info)
1264{ 1235{
1265 unsigned long icr; 1236 unsigned long icr;
1266 1237
@@ -1314,12 +1285,12 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1314 int err = 0; 1285 int err = 0;
1315 int i; 1286 int i;
1316 struct list_head *ptr; 1287 struct list_head *ptr;
1317 me4000_info_t *board_info = NULL; 1288 struct me4000_info *board_info = NULL;
1318 me4000_ao_context_t *ao_context = NULL; 1289 struct me4000_ao_context *ao_context = NULL;
1319 me4000_ai_context_t *ai_context = NULL; 1290 struct me4000_ai_context *ai_context = NULL;
1320 me4000_dio_context_t *dio_context = NULL; 1291 struct me4000_dio_context *dio_context = NULL;
1321 me4000_cnt_context_t *cnt_context = NULL; 1292 struct me4000_cnt_context *cnt_context = NULL;
1322 me4000_ext_int_context_t *ext_int_context = NULL; 1293 struct me4000_ext_int_context *ext_int_context = NULL;
1323 1294
1324 CALL_PDEBUG("me4000_open() is executed\n"); 1295 CALL_PDEBUG("me4000_open() is executed\n");
1325 1296
@@ -1335,7 +1306,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1335 /* Search for the board context */ 1306 /* Search for the board context */
1336 for (ptr = me4000_board_info_list.next, i = 0; 1307 for (ptr = me4000_board_info_list.next, i = 0;
1337 ptr != &me4000_board_info_list; ptr = ptr->next, i++) { 1308 ptr != &me4000_board_info_list; ptr = ptr->next, i++) {
1338 board_info = list_entry(ptr, me4000_info_t, list); 1309 board_info = list_entry(ptr, struct me4000_info, list);
1339 if (i == board) 1310 if (i == board)
1340 break; 1311 break;
1341 } 1312 }
@@ -1351,7 +1322,8 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1351 for (ptr = board_info->ao_context_list.next, i = 0; 1322 for (ptr = board_info->ao_context_list.next, i = 0;
1352 ptr != &board_info->ao_context_list; 1323 ptr != &board_info->ao_context_list;
1353 ptr = ptr->next, i++) { 1324 ptr = ptr->next, i++) {
1354 ao_context = list_entry(ptr, me4000_ao_context_t, list); 1325 ao_context = list_entry(ptr, struct me4000_ao_context,
1326 list);
1355 if (i == dev) 1327 if (i == dev)
1356 break; 1328 break;
1357 } 1329 }
@@ -1415,7 +1387,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1415 /* Search for the board context */ 1387 /* Search for the board context */
1416 for (ptr = me4000_board_info_list.next, i = 0; 1388 for (ptr = me4000_board_info_list.next, i = 0;
1417 ptr != &me4000_board_info_list; ptr = ptr->next, i++) { 1389 ptr != &me4000_board_info_list; ptr = ptr->next, i++) {
1418 board_info = list_entry(ptr, me4000_info_t, list); 1390 board_info = list_entry(ptr, struct me4000_info, list);
1419 if (i == board) 1391 if (i == board)
1420 break; 1392 break;
1421 } 1393 }
@@ -1469,7 +1441,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1469 /* Search for the board context */ 1441 /* Search for the board context */
1470 for (ptr = me4000_board_info_list.next; 1442 for (ptr = me4000_board_info_list.next;
1471 ptr != &me4000_board_info_list; ptr = ptr->next) { 1443 ptr != &me4000_board_info_list; ptr = ptr->next) {
1472 board_info = list_entry(ptr, me4000_info_t, list); 1444 board_info = list_entry(ptr, struct me4000_info, list);
1473 if (board_info->board_count == board) 1445 if (board_info->board_count == board)
1474 break; 1446 break;
1475 } 1447 }
@@ -1514,7 +1486,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1514 /* Search for the board context */ 1486 /* Search for the board context */
1515 for (ptr = me4000_board_info_list.next; 1487 for (ptr = me4000_board_info_list.next;
1516 ptr != &me4000_board_info_list; ptr = ptr->next) { 1488 ptr != &me4000_board_info_list; ptr = ptr->next) {
1517 board_info = list_entry(ptr, me4000_info_t, list); 1489 board_info = list_entry(ptr, struct me4000_info, list);
1518 if (board_info->board_count == board) 1490 if (board_info->board_count == board)
1519 break; 1491 break;
1520 } 1492 }
@@ -1557,7 +1529,7 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1557 /* Search for the board context */ 1529 /* Search for the board context */
1558 for (ptr = me4000_board_info_list.next; 1530 for (ptr = me4000_board_info_list.next;
1559 ptr != &me4000_board_info_list; ptr = ptr->next) { 1531 ptr != &me4000_board_info_list; ptr = ptr->next) {
1560 board_info = list_entry(ptr, me4000_info_t, list); 1532 board_info = list_entry(ptr, struct me4000_info, list);
1561 if (board_info->board_count == board) 1533 if (board_info->board_count == board)
1562 break; 1534 break;
1563 } 1535 }
@@ -1613,11 +1585,11 @@ static int me4000_open(struct inode *inode_p, struct file *file_p)
1613 1585
1614static int me4000_release(struct inode *inode_p, struct file *file_p) 1586static int me4000_release(struct inode *inode_p, struct file *file_p)
1615{ 1587{
1616 me4000_ao_context_t *ao_context; 1588 struct me4000_ao_context *ao_context;
1617 me4000_ai_context_t *ai_context; 1589 struct me4000_ai_context *ai_context;
1618 me4000_dio_context_t *dio_context; 1590 struct me4000_dio_context *dio_context;
1619 me4000_cnt_context_t *cnt_context; 1591 struct me4000_cnt_context *cnt_context;
1620 me4000_ext_int_context_t *ext_int_context; 1592 struct me4000_ext_int_context *ext_int_context;
1621 1593
1622 CALL_PDEBUG("me4000_release() is executed\n"); 1594 CALL_PDEBUG("me4000_release() is executed\n");
1623 1595
@@ -1677,7 +1649,7 @@ static int me4000_release(struct inode *inode_p, struct file *file_p)
1677 1649
1678/*------------------------------- Analog output stuff --------------------------------------*/ 1650/*------------------------------- Analog output stuff --------------------------------------*/
1679 1651
1680static int me4000_ao_prepare(me4000_ao_context_t * ao_context) 1652static int me4000_ao_prepare(struct me4000_ao_context *ao_context)
1681{ 1653{
1682 unsigned long flags; 1654 unsigned long flags;
1683 1655
@@ -1756,7 +1728,7 @@ static int me4000_ao_prepare(me4000_ao_context_t * ao_context)
1756 return 0; 1728 return 0;
1757} 1729}
1758 1730
1759static int me4000_ao_reset(me4000_ao_context_t * ao_context) 1731static int me4000_ao_reset(struct me4000_ao_context *ao_context)
1760{ 1732{
1761 u32 tmp; 1733 u32 tmp;
1762 wait_queue_head_t queue; 1734 wait_queue_head_t queue;
@@ -1777,9 +1749,10 @@ static int me4000_ao_reset(me4000_ao_context_t * ao_context)
1777 tmp |= ME4000_AO_CTRL_BIT_IMMEDIATE_STOP; 1749 tmp |= ME4000_AO_CTRL_BIT_IMMEDIATE_STOP;
1778 me4000_outl(tmp, ao_context->ctrl_reg); 1750 me4000_outl(tmp, ao_context->ctrl_reg);
1779 1751
1780 while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) { 1752 wait_event_timeout(queue,
1781 sleep_on_timeout(&queue, 1); 1753 (inl(ao_context->status_reg) &
1782 } 1754 ME4000_AO_STATUS_BIT_FSM) == 0,
1755 1);
1783 1756
1784 /* Set to transparent mode */ 1757 /* Set to transparent mode */
1785 me4000_ao_simultaneous_disable(ao_context); 1758 me4000_ao_simultaneous_disable(ao_context);
@@ -1812,9 +1785,10 @@ static int me4000_ao_reset(me4000_ao_context_t * ao_context)
1812 me4000_outl(tmp, ao_context->ctrl_reg); 1785 me4000_outl(tmp, ao_context->ctrl_reg);
1813 spin_unlock_irqrestore(&ao_context->int_lock, flags); 1786 spin_unlock_irqrestore(&ao_context->int_lock, flags);
1814 1787
1815 while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) { 1788 wait_event_timeout(queue,
1816 sleep_on_timeout(&queue, 1); 1789 (inl(ao_context->status_reg) &
1817 } 1790 ME4000_AO_STATUS_BIT_FSM) == 0,
1791 1);
1818 1792
1819 /* Clear the circular buffer */ 1793 /* Clear the circular buffer */
1820 ao_context->circ_buf.head = 0; 1794 ao_context->circ_buf.head = 0;
@@ -1853,9 +1827,9 @@ static int me4000_ao_reset(me4000_ao_context_t * ao_context)
1853} 1827}
1854 1828
1855static ssize_t me4000_ao_write_sing(struct file *filep, const char *buff, 1829static ssize_t me4000_ao_write_sing(struct file *filep, const char *buff,
1856 size_t cnt, loff_t * offp) 1830 size_t cnt, loff_t *offp)
1857{ 1831{
1858 me4000_ao_context_t *ao_context = filep->private_data; 1832 struct me4000_ao_context *ao_context = filep->private_data;
1859 u32 value; 1833 u32 value;
1860 const u16 *buffer = (const u16 *)buff; 1834 const u16 *buffer = (const u16 *)buff;
1861 1835
@@ -1863,13 +1837,13 @@ static ssize_t me4000_ao_write_sing(struct file *filep, const char *buff,
1863 1837
1864 if (cnt != 2) { 1838 if (cnt != 2) {
1865 printk(KERN_ERR 1839 printk(KERN_ERR
1866 "me4000_ao_write_sing():Write count is not 2\n"); 1840 "%s:Write count is not 2\n", __func__);
1867 return -EINVAL; 1841 return -EINVAL;
1868 } 1842 }
1869 1843
1870 if (get_user(value, buffer)) { 1844 if (get_user(value, buffer)) {
1871 printk(KERN_ERR 1845 printk(KERN_ERR
1872 "me4000_ao_write_sing():Cannot copy data from user\n"); 1846 "%s:Cannot copy data from user\n", __func__);
1873 return -EFAULT; 1847 return -EFAULT;
1874 } 1848 }
1875 1849
@@ -1879,9 +1853,9 @@ static ssize_t me4000_ao_write_sing(struct file *filep, const char *buff,
1879} 1853}
1880 1854
1881static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff, 1855static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
1882 size_t cnt, loff_t * offp) 1856 size_t cnt, loff_t *offp)
1883{ 1857{
1884 me4000_ao_context_t *ao_context = filep->private_data; 1858 struct me4000_ao_context *ao_context = filep->private_data;
1885 size_t i; 1859 size_t i;
1886 u32 value; 1860 u32 value;
1887 u32 tmp; 1861 u32 tmp;
@@ -1893,13 +1867,13 @@ static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
1893 /* Check if a conversion is already running */ 1867 /* Check if a conversion is already running */
1894 if (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) { 1868 if (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) {
1895 printk(KERN_ERR 1869 printk(KERN_ERR
1896 "ME4000:me4000_ao_write_wrap():There is already a conversion running\n"); 1870 "%s:There is already a conversion running\n", __func__);
1897 return -EBUSY; 1871 return -EBUSY;
1898 } 1872 }
1899 1873
1900 if (count > ME4000_AO_FIFO_COUNT) { 1874 if (count > ME4000_AO_FIFO_COUNT) {
1901 printk(KERN_ERR 1875 printk(KERN_ERR
1902 "me4000_ao_write_wrap():Can't load more than %d values\n", 1876 "%s:Can't load more than %d values\n", __func__,
1903 ME4000_AO_FIFO_COUNT); 1877 ME4000_AO_FIFO_COUNT);
1904 return -ENOSPC; 1878 return -ENOSPC;
1905 } 1879 }
@@ -1914,7 +1888,7 @@ static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
1914 for (i = 0; i < count; i++) { 1888 for (i = 0; i < count; i++) {
1915 if (get_user(value, buffer + i)) { 1889 if (get_user(value, buffer + i)) {
1916 printk(KERN_ERR 1890 printk(KERN_ERR
1917 "me4000_ao_write_single():Cannot copy data from user\n"); 1891 "%s:Cannot copy data from user\n", __func__);
1918 return -EFAULT; 1892 return -EFAULT;
1919 } 1893 }
1920 if (((ao_context->fifo_reg & 0xFF) == ME4000_AO_01_FIFO_REG) 1894 if (((ao_context->fifo_reg & 0xFF) == ME4000_AO_01_FIFO_REG)
@@ -1928,9 +1902,9 @@ static ssize_t me4000_ao_write_wrap(struct file *filep, const char *buff,
1928} 1902}
1929 1903
1930static ssize_t me4000_ao_write_cont(struct file *filep, const char *buff, 1904static ssize_t me4000_ao_write_cont(struct file *filep, const char *buff,
1931 size_t cnt, loff_t * offp) 1905 size_t cnt, loff_t *offp)
1932{ 1906{
1933 me4000_ao_context_t *ao_context = filep->private_data; 1907 struct me4000_ao_context *ao_context = filep->private_data;
1934 const u16 *buffer = (const u16 *)buff; 1908 const u16 *buffer = (const u16 *)buff;
1935 size_t count = cnt / 2; 1909 size_t count = cnt / 2;
1936 unsigned long flags; 1910 unsigned long flags;
@@ -2154,9 +2128,9 @@ static ssize_t me4000_ao_write_cont(struct file *filep, const char *buff,
2154 return 2 * ret; 2128 return 2 * ret;
2155} 2129}
2156 2130
2157static unsigned int me4000_ao_poll_cont(struct file *file_p, poll_table * wait) 2131static unsigned int me4000_ao_poll_cont(struct file *file_p, poll_table *wait)
2158{ 2132{
2159 me4000_ao_context_t *ao_context; 2133 struct me4000_ao_context *ao_context;
2160 unsigned long mask = 0; 2134 unsigned long mask = 0;
2161 2135
2162 CALL_PDEBUG("me4000_ao_poll_cont() is executed\n"); 2136 CALL_PDEBUG("me4000_ao_poll_cont() is executed\n");
@@ -2177,7 +2151,7 @@ static unsigned int me4000_ao_poll_cont(struct file *file_p, poll_table * wait)
2177static int me4000_ao_fsync_cont(struct file *file_p, struct dentry *dentry_p, 2151static int me4000_ao_fsync_cont(struct file *file_p, struct dentry *dentry_p,
2178 int datasync) 2152 int datasync)
2179{ 2153{
2180 me4000_ao_context_t *ao_context; 2154 struct me4000_ao_context *ao_context;
2181 wait_queue_head_t queue; 2155 wait_queue_head_t queue;
2182 2156
2183 CALL_PDEBUG("me4000_ao_fsync_cont() is executed\n"); 2157 CALL_PDEBUG("me4000_ao_fsync_cont() is executed\n");
@@ -2187,15 +2161,19 @@ static int me4000_ao_fsync_cont(struct file *file_p, struct dentry *dentry_p,
2187 2161
2188 while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) { 2162 while (inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM) {
2189 interruptible_sleep_on_timeout(&queue, 1); 2163 interruptible_sleep_on_timeout(&queue, 1);
2164 wait_event_interruptible_timeout(queue,
2165 !(inl(ao_context->status_reg) & ME4000_AO_STATUS_BIT_FSM),
2166 1);
2190 if (ao_context->pipe_flag) { 2167 if (ao_context->pipe_flag) {
2191 printk(KERN_ERR 2168 printk(KERN_ERR
2192 "me4000_ao_fsync_cont():Broken pipe detected\n"); 2169 "%s:Broken pipe detected\n", __func__);
2193 return -EPIPE; 2170 return -EPIPE;
2194 } 2171 }
2195 2172
2196 if (signal_pending(current)) { 2173 if (signal_pending(current)) {
2197 printk(KERN_ERR 2174 printk(KERN_ERR
2198 "me4000_ao_fsync_cont():Wait on state machine interrupted\n"); 2175 "%s:Wait on state machine interrupted\n",
2176 __func__);
2199 return -EINTR; 2177 return -EINTR;
2200 } 2178 }
2201 } 2179 }
@@ -2206,7 +2184,7 @@ static int me4000_ao_fsync_cont(struct file *file_p, struct dentry *dentry_p,
2206static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p, 2184static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
2207 unsigned int service, unsigned long arg) 2185 unsigned int service, unsigned long arg)
2208{ 2186{
2209 me4000_ao_context_t *ao_context; 2187 struct me4000_ao_context *ao_context;
2210 2188
2211 CALL_PDEBUG("me4000_ao_ioctl_sing() is executed\n"); 2189 CALL_PDEBUG("me4000_ao_ioctl_sing() is executed\n");
2212 2190
@@ -2229,7 +2207,7 @@ static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
2229 case ME4000_AO_PRELOAD_UPDATE: 2207 case ME4000_AO_PRELOAD_UPDATE:
2230 return me4000_ao_preload_update(ao_context); 2208 return me4000_ao_preload_update(ao_context);
2231 case ME4000_GET_USER_INFO: 2209 case ME4000_GET_USER_INFO:
2232 return me4000_get_user_info((me4000_user_info_t *) arg, 2210 return me4000_get_user_info((struct me4000_user_info *)arg,
2233 ao_context->board_info); 2211 ao_context->board_info);
2234 case ME4000_AO_SIMULTANEOUS_EX_TRIG: 2212 case ME4000_AO_SIMULTANEOUS_EX_TRIG:
2235 return me4000_ao_simultaneous_ex_trig(ao_context); 2213 return me4000_ao_simultaneous_ex_trig(ao_context);
@@ -2239,8 +2217,9 @@ static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
2239 return me4000_ao_simultaneous_disable(ao_context); 2217 return me4000_ao_simultaneous_disable(ao_context);
2240 case ME4000_AO_SIMULTANEOUS_UPDATE: 2218 case ME4000_AO_SIMULTANEOUS_UPDATE:
2241 return 2219 return
2242 me4000_ao_simultaneous_update((me4000_ao_channel_list_t *) 2220 me4000_ao_simultaneous_update(
2243 arg, ao_context); 2221 (struct me4000_ao_channel_list *)arg,
2222 ao_context);
2244 case ME4000_AO_EX_TRIG_TIMEOUT: 2223 case ME4000_AO_EX_TRIG_TIMEOUT:
2245 return me4000_ao_ex_trig_timeout((unsigned long *)arg, 2224 return me4000_ao_ex_trig_timeout((unsigned long *)arg,
2246 ao_context); 2225 ao_context);
@@ -2258,7 +2237,7 @@ static int me4000_ao_ioctl_sing(struct inode *inode_p, struct file *file_p,
2258static int me4000_ao_ioctl_wrap(struct inode *inode_p, struct file *file_p, 2237static int me4000_ao_ioctl_wrap(struct inode *inode_p, struct file *file_p,
2259 unsigned int service, unsigned long arg) 2238 unsigned int service, unsigned long arg)
2260{ 2239{
2261 me4000_ao_context_t *ao_context; 2240 struct me4000_ao_context *ao_context;
2262 2241
2263 CALL_PDEBUG("me4000_ao_ioctl_wrap() is executed\n"); 2242 CALL_PDEBUG("me4000_ao_ioctl_wrap() is executed\n");
2264 2243
@@ -2287,7 +2266,7 @@ static int me4000_ao_ioctl_wrap(struct inode *inode_p, struct file *file_p,
2287 case ME4000_AO_EX_TRIG_DISABLE: 2266 case ME4000_AO_EX_TRIG_DISABLE:
2288 return me4000_ao_ex_trig_disable(ao_context); 2267 return me4000_ao_ex_trig_disable(ao_context);
2289 case ME4000_GET_USER_INFO: 2268 case ME4000_GET_USER_INFO:
2290 return me4000_get_user_info((me4000_user_info_t *) arg, 2269 return me4000_get_user_info((struct me4000_user_info *)arg,
2291 ao_context->board_info); 2270 ao_context->board_info);
2292 case ME4000_AO_FSM_STATE: 2271 case ME4000_AO_FSM_STATE:
2293 return me4000_ao_fsm_state((int *)arg, ao_context); 2272 return me4000_ao_fsm_state((int *)arg, ao_context);
@@ -2310,7 +2289,7 @@ static int me4000_ao_ioctl_wrap(struct inode *inode_p, struct file *file_p,
2310static int me4000_ao_ioctl_cont(struct inode *inode_p, struct file *file_p, 2289static int me4000_ao_ioctl_cont(struct inode *inode_p, struct file *file_p,
2311 unsigned int service, unsigned long arg) 2290 unsigned int service, unsigned long arg)
2312{ 2291{
2313 me4000_ao_context_t *ao_context; 2292 struct me4000_ao_context *ao_context;
2314 2293
2315 CALL_PDEBUG("me4000_ao_ioctl_cont() is executed\n"); 2294 CALL_PDEBUG("me4000_ao_ioctl_cont() is executed\n");
2316 2295
@@ -2345,7 +2324,7 @@ static int me4000_ao_ioctl_cont(struct inode *inode_p, struct file *file_p,
2345 case ME4000_AO_FSM_STATE: 2324 case ME4000_AO_FSM_STATE:
2346 return me4000_ao_fsm_state((int *)arg, ao_context); 2325 return me4000_ao_fsm_state((int *)arg, ao_context);
2347 case ME4000_GET_USER_INFO: 2326 case ME4000_GET_USER_INFO:
2348 return me4000_get_user_info((me4000_user_info_t *) arg, 2327 return me4000_get_user_info((struct me4000_user_info *)arg,
2349 ao_context->board_info); 2328 ao_context->board_info);
2350 case ME4000_AO_SYNCHRONOUS_EX_TRIG: 2329 case ME4000_AO_SYNCHRONOUS_EX_TRIG:
2351 return me4000_ao_synchronous_ex_trig(ao_context); 2330 return me4000_ao_synchronous_ex_trig(ao_context);
@@ -2362,7 +2341,8 @@ static int me4000_ao_ioctl_cont(struct inode *inode_p, struct file *file_p,
2362 return 0; 2341 return 0;
2363} 2342}
2364 2343
2365static int me4000_ao_start(unsigned long *arg, me4000_ao_context_t * ao_context) 2344static int me4000_ao_start(unsigned long *arg,
2345 struct me4000_ao_context *ao_context)
2366{ 2346{
2367 u32 tmp; 2347 u32 tmp;
2368 wait_queue_head_t queue; 2348 wait_queue_head_t queue;
@@ -2412,7 +2392,7 @@ static int me4000_ao_start(unsigned long *arg, me4000_ao_context_t * ao_context)
2412 return 0; 2392 return 0;
2413} 2393}
2414 2394
2415static int me4000_ao_stop(me4000_ao_context_t * ao_context) 2395static int me4000_ao_stop(struct me4000_ao_context *ao_context)
2416{ 2396{
2417 u32 tmp; 2397 u32 tmp;
2418 wait_queue_head_t queue; 2398 wait_queue_head_t queue;
@@ -2445,7 +2425,7 @@ static int me4000_ao_stop(me4000_ao_context_t * ao_context)
2445 return 0; 2425 return 0;
2446} 2426}
2447 2427
2448static int me4000_ao_immediate_stop(me4000_ao_context_t * ao_context) 2428static int me4000_ao_immediate_stop(struct me4000_ao_context *ao_context)
2449{ 2429{
2450 u32 tmp; 2430 u32 tmp;
2451 wait_queue_head_t queue; 2431 wait_queue_head_t queue;
@@ -2477,8 +2457,8 @@ static int me4000_ao_immediate_stop(me4000_ao_context_t * ao_context)
2477 return 0; 2457 return 0;
2478} 2458}
2479 2459
2480static int me4000_ao_timer_set_divisor(u32 * arg, 2460static int me4000_ao_timer_set_divisor(u32 *arg,
2481 me4000_ao_context_t * ao_context) 2461 struct me4000_ao_context *ao_context)
2482{ 2462{
2483 u32 divisor; 2463 u32 divisor;
2484 u32 tmp; 2464 u32 tmp;
@@ -2518,7 +2498,7 @@ static int me4000_ao_timer_set_divisor(u32 * arg,
2518} 2498}
2519 2499
2520static int me4000_ao_ex_trig_set_edge(int *arg, 2500static int me4000_ao_ex_trig_set_edge(int *arg,
2521 me4000_ao_context_t * ao_context) 2501 struct me4000_ao_context *ao_context)
2522{ 2502{
2523 int mode; 2503 int mode;
2524 u32 tmp; 2504 u32 tmp;
@@ -2569,7 +2549,7 @@ static int me4000_ao_ex_trig_set_edge(int *arg,
2569 return 0; 2549 return 0;
2570} 2550}
2571 2551
2572static int me4000_ao_ex_trig_enable(me4000_ao_context_t * ao_context) 2552static int me4000_ao_ex_trig_enable(struct me4000_ao_context *ao_context)
2573{ 2553{
2574 u32 tmp; 2554 u32 tmp;
2575 unsigned long flags; 2555 unsigned long flags;
@@ -2593,7 +2573,7 @@ static int me4000_ao_ex_trig_enable(me4000_ao_context_t * ao_context)
2593 return 0; 2573 return 0;
2594} 2574}
2595 2575
2596static int me4000_ao_ex_trig_disable(me4000_ao_context_t * ao_context) 2576static int me4000_ao_ex_trig_disable(struct me4000_ao_context *ao_context)
2597{ 2577{
2598 u32 tmp; 2578 u32 tmp;
2599 unsigned long flags; 2579 unsigned long flags;
@@ -2617,7 +2597,7 @@ static int me4000_ao_ex_trig_disable(me4000_ao_context_t * ao_context)
2617 return 0; 2597 return 0;
2618} 2598}
2619 2599
2620static int me4000_ao_simultaneous_disable(me4000_ao_context_t * ao_context) 2600static int me4000_ao_simultaneous_disable(struct me4000_ao_context *ao_context)
2621{ 2601{
2622 u32 tmp; 2602 u32 tmp;
2623 2603
@@ -2643,7 +2623,7 @@ static int me4000_ao_simultaneous_disable(me4000_ao_context_t * ao_context)
2643 return 0; 2623 return 0;
2644} 2624}
2645 2625
2646static int me4000_ao_simultaneous_ex_trig(me4000_ao_context_t * ao_context) 2626static int me4000_ao_simultaneous_ex_trig(struct me4000_ao_context *ao_context)
2647{ 2627{
2648 u32 tmp; 2628 u32 tmp;
2649 2629
@@ -2659,7 +2639,7 @@ static int me4000_ao_simultaneous_ex_trig(me4000_ao_context_t * ao_context)
2659 return 0; 2639 return 0;
2660} 2640}
2661 2641
2662static int me4000_ao_simultaneous_sw(me4000_ao_context_t * ao_context) 2642static int me4000_ao_simultaneous_sw(struct me4000_ao_context *ao_context)
2663{ 2643{
2664 u32 tmp; 2644 u32 tmp;
2665 2645
@@ -2675,13 +2655,13 @@ static int me4000_ao_simultaneous_sw(me4000_ao_context_t * ao_context)
2675 return 0; 2655 return 0;
2676} 2656}
2677 2657
2678static int me4000_ao_preload(me4000_ao_context_t * ao_context) 2658static int me4000_ao_preload(struct me4000_ao_context *ao_context)
2679{ 2659{
2680 CALL_PDEBUG("me4000_ao_preload() is executed\n"); 2660 CALL_PDEBUG("me4000_ao_preload() is executed\n");
2681 return me4000_ao_simultaneous_sw(ao_context); 2661 return me4000_ao_simultaneous_sw(ao_context);
2682} 2662}
2683 2663
2684static int me4000_ao_preload_update(me4000_ao_context_t * ao_context) 2664static int me4000_ao_preload_update(struct me4000_ao_context *ao_context)
2685{ 2665{
2686 u32 tmp; 2666 u32 tmp;
2687 u32 ctrl; 2667 u32 ctrl;
@@ -2705,10 +2685,12 @@ static int me4000_ao_preload_update(me4000_ao_context_t * ao_context)
2705 if (! 2685 if (!
2706 (tmp & 2686 (tmp &
2707 (0x1 << 2687 (0x1 <<
2708 (((me4000_ao_context_t *) entry)->index + 16)))) { 2688 (((struct me4000_ao_context *)entry)->index
2689 + 16)))) {
2709 tmp &= 2690 tmp &=
2710 ~(0x1 << 2691 ~(0x1 <<
2711 (((me4000_ao_context_t *) entry)->index)); 2692 (((struct me4000_ao_context *)entry)->
2693 index));
2712 } 2694 }
2713 } 2695 }
2714 } 2696 }
@@ -2718,18 +2700,19 @@ static int me4000_ao_preload_update(me4000_ao_context_t * ao_context)
2718 return 0; 2700 return 0;
2719} 2701}
2720 2702
2721static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * arg, 2703static int me4000_ao_simultaneous_update(struct me4000_ao_channel_list *arg,
2722 me4000_ao_context_t * ao_context) 2704 struct me4000_ao_context *ao_context)
2723{ 2705{
2724 int err; 2706 int err;
2725 int i; 2707 int i;
2726 u32 tmp; 2708 u32 tmp;
2727 me4000_ao_channel_list_t channels; 2709 struct me4000_ao_channel_list channels;
2728 2710
2729 CALL_PDEBUG("me4000_ao_simultaneous_update() is executed\n"); 2711 CALL_PDEBUG("me4000_ao_simultaneous_update() is executed\n");
2730 2712
2731 /* Copy data from user */ 2713 /* Copy data from user */
2732 err = copy_from_user(&channels, arg, sizeof(me4000_ao_channel_list_t)); 2714 err = copy_from_user(&channels, arg,
2715 sizeof(struct me4000_ao_channel_list));
2733 if (err) { 2716 if (err) {
2734 printk(KERN_ERR 2717 printk(KERN_ERR
2735 "ME4000:me4000_ao_simultaneous_update():Can't copy command\n"); 2718 "ME4000:me4000_ao_simultaneous_update():Can't copy command\n");
@@ -2737,13 +2720,12 @@ static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * arg,
2737 } 2720 }
2738 2721
2739 channels.list = 2722 channels.list =
2740 kmalloc(sizeof(unsigned long) * channels.count, GFP_KERNEL); 2723 kzalloc(sizeof(unsigned long) * channels.count, GFP_KERNEL);
2741 if (!channels.list) { 2724 if (!channels.list) {
2742 printk(KERN_ERR 2725 printk(KERN_ERR
2743 "ME4000:me4000_ao_simultaneous_update():Can't get buffer\n"); 2726 "ME4000:me4000_ao_simultaneous_update():Can't get buffer\n");
2744 return -ENOMEM; 2727 return -ENOMEM;
2745 } 2728 }
2746 memset(channels.list, 0, sizeof(unsigned long) * channels.count);
2747 2729
2748 /* Copy channel list from user */ 2730 /* Copy channel list from user */
2749 err = 2731 err =
@@ -2777,7 +2759,7 @@ static int me4000_ao_simultaneous_update(me4000_ao_channel_list_t * arg,
2777 return 0; 2759 return 0;
2778} 2760}
2779 2761
2780static int me4000_ao_synchronous_ex_trig(me4000_ao_context_t * ao_context) 2762static int me4000_ao_synchronous_ex_trig(struct me4000_ao_context *ao_context)
2781{ 2763{
2782 u32 tmp; 2764 u32 tmp;
2783 unsigned long flags; 2765 unsigned long flags;
@@ -2813,7 +2795,7 @@ static int me4000_ao_synchronous_ex_trig(me4000_ao_context_t * ao_context)
2813 return 0; 2795 return 0;
2814} 2796}
2815 2797
2816static int me4000_ao_synchronous_sw(me4000_ao_context_t * ao_context) 2798static int me4000_ao_synchronous_sw(struct me4000_ao_context *ao_context)
2817{ 2799{
2818 u32 tmp; 2800 u32 tmp;
2819 unsigned long flags; 2801 unsigned long flags;
@@ -2848,13 +2830,13 @@ static int me4000_ao_synchronous_sw(me4000_ao_context_t * ao_context)
2848 return 0; 2830 return 0;
2849} 2831}
2850 2832
2851static int me4000_ao_synchronous_disable(me4000_ao_context_t * ao_context) 2833static int me4000_ao_synchronous_disable(struct me4000_ao_context *ao_context)
2852{ 2834{
2853 return me4000_ao_simultaneous_disable(ao_context); 2835 return me4000_ao_simultaneous_disable(ao_context);
2854} 2836}
2855 2837
2856static int me4000_ao_get_free_buffer(unsigned long *arg, 2838static int me4000_ao_get_free_buffer(unsigned long *arg,
2857 me4000_ao_context_t * ao_context) 2839 struct me4000_ao_context *ao_context)
2858{ 2840{
2859 unsigned long c; 2841 unsigned long c;
2860 int err; 2842 int err;
@@ -2864,7 +2846,7 @@ static int me4000_ao_get_free_buffer(unsigned long *arg,
2864 err = copy_to_user(arg, &c, sizeof(unsigned long)); 2846 err = copy_to_user(arg, &c, sizeof(unsigned long));
2865 if (err) { 2847 if (err) {
2866 printk(KERN_ERR 2848 printk(KERN_ERR
2867 "ME4000:me4000_ao_get_free_buffer():Can't copy to user space\n"); 2849 "%s:Can't copy to user space\n", __func__);
2868 return -EFAULT; 2850 return -EFAULT;
2869 } 2851 }
2870 2852
@@ -2872,7 +2854,7 @@ static int me4000_ao_get_free_buffer(unsigned long *arg,
2872} 2854}
2873 2855
2874static int me4000_ao_ex_trig_timeout(unsigned long *arg, 2856static int me4000_ao_ex_trig_timeout(unsigned long *arg,
2875 me4000_ao_context_t * ao_context) 2857 struct me4000_ao_context *ao_context)
2876{ 2858{
2877 u32 tmp; 2859 u32 tmp;
2878 wait_queue_head_t queue; 2860 wait_queue_head_t queue;
@@ -2928,7 +2910,7 @@ static int me4000_ao_ex_trig_timeout(unsigned long *arg,
2928 return 0; 2910 return 0;
2929} 2911}
2930 2912
2931static int me4000_ao_enable_do(me4000_ao_context_t * ao_context) 2913static int me4000_ao_enable_do(struct me4000_ao_context *ao_context)
2932{ 2914{
2933 u32 tmp; 2915 u32 tmp;
2934 unsigned long flags; 2916 unsigned long flags;
@@ -2959,7 +2941,7 @@ static int me4000_ao_enable_do(me4000_ao_context_t * ao_context)
2959 return 0; 2941 return 0;
2960} 2942}
2961 2943
2962static int me4000_ao_disable_do(me4000_ao_context_t * ao_context) 2944static int me4000_ao_disable_do(struct me4000_ao_context *ao_context)
2963{ 2945{
2964 u32 tmp; 2946 u32 tmp;
2965 unsigned long flags; 2947 unsigned long flags;
@@ -2989,7 +2971,7 @@ static int me4000_ao_disable_do(me4000_ao_context_t * ao_context)
2989 return 0; 2971 return 0;
2990} 2972}
2991 2973
2992static int me4000_ao_fsm_state(int *arg, me4000_ao_context_t * ao_context) 2974static int me4000_ao_fsm_state(int *arg, struct me4000_ao_context *ao_context)
2993{ 2975{
2994 unsigned long tmp; 2976 unsigned long tmp;
2995 2977
@@ -3012,9 +2994,9 @@ static int me4000_ao_fsm_state(int *arg, me4000_ao_context_t * ao_context)
3012 return 0; 2994 return 0;
3013} 2995}
3014 2996
3015/*------------------------------- Analog input stuff --------------------------------------*/ 2997/*------------------------- Analog input stuff -------------------------------*/
3016 2998
3017static int me4000_ai_prepare(me4000_ai_context_t * ai_context) 2999static int me4000_ai_prepare(struct me4000_ai_context *ai_context)
3018{ 3000{
3019 wait_queue_head_t queue; 3001 wait_queue_head_t queue;
3020 int err; 3002 int err;
@@ -3057,14 +3039,13 @@ static int me4000_ai_prepare(me4000_ai_context_t * ai_context)
3057 3039
3058 /* Allocate circular buffer */ 3040 /* Allocate circular buffer */
3059 ai_context->circ_buf.buf = 3041 ai_context->circ_buf.buf =
3060 kmalloc(ME4000_AI_BUFFER_SIZE, GFP_KERNEL); 3042 kzalloc(ME4000_AI_BUFFER_SIZE, GFP_KERNEL);
3061 if (!ai_context->circ_buf.buf) { 3043 if (!ai_context->circ_buf.buf) {
3062 printk(KERN_ERR 3044 printk(KERN_ERR
3063 "ME4000:me4000_ai_prepare():Can't get circular buffer\n"); 3045 "ME4000:me4000_ai_prepare():Can't get circular buffer\n");
3064 free_irq(ai_context->irq, ai_context); 3046 free_irq(ai_context->irq, ai_context);
3065 return -ENOMEM; 3047 return -ENOMEM;
3066 } 3048 }
3067 memset(ai_context->circ_buf.buf, 0, ME4000_AI_BUFFER_SIZE);
3068 3049
3069 /* Clear the circular buffer */ 3050 /* Clear the circular buffer */
3070 ai_context->circ_buf.head = 0; 3051 ai_context->circ_buf.head = 0;
@@ -3074,7 +3055,7 @@ static int me4000_ai_prepare(me4000_ai_context_t * ai_context)
3074 return 0; 3055 return 0;
3075} 3056}
3076 3057
3077static int me4000_ai_reset(me4000_ai_context_t * ai_context) 3058static int me4000_ai_reset(struct me4000_ai_context *ai_context)
3078{ 3059{
3079 wait_queue_head_t queue; 3060 wait_queue_head_t queue;
3080 u32 tmp; 3061 u32 tmp;
@@ -3139,7 +3120,7 @@ static int me4000_ai_reset(me4000_ai_context_t * ai_context)
3139static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p, 3120static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
3140 unsigned int service, unsigned long arg) 3121 unsigned int service, unsigned long arg)
3141{ 3122{
3142 me4000_ai_context_t *ai_context; 3123 struct me4000_ai_context *ai_context;
3143 3124
3144 CALL_PDEBUG("me4000_ai_ioctl_sing() is executed\n"); 3125 CALL_PDEBUG("me4000_ai_ioctl_sing() is executed\n");
3145 3126
@@ -3157,16 +3138,17 @@ static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
3157 3138
3158 switch (service) { 3139 switch (service) {
3159 case ME4000_AI_SINGLE: 3140 case ME4000_AI_SINGLE:
3160 return me4000_ai_single((me4000_ai_single_t *) arg, ai_context); 3141 return me4000_ai_single((struct me4000_ai_single *)arg,
3142 ai_context);
3161 case ME4000_AI_EX_TRIG_ENABLE: 3143 case ME4000_AI_EX_TRIG_ENABLE:
3162 return me4000_ai_ex_trig_enable(ai_context); 3144 return me4000_ai_ex_trig_enable(ai_context);
3163 case ME4000_AI_EX_TRIG_DISABLE: 3145 case ME4000_AI_EX_TRIG_DISABLE:
3164 return me4000_ai_ex_trig_disable(ai_context); 3146 return me4000_ai_ex_trig_disable(ai_context);
3165 case ME4000_AI_EX_TRIG_SETUP: 3147 case ME4000_AI_EX_TRIG_SETUP:
3166 return me4000_ai_ex_trig_setup((me4000_ai_trigger_t *) arg, 3148 return me4000_ai_ex_trig_setup((struct me4000_ai_trigger *)arg,
3167 ai_context); 3149 ai_context);
3168 case ME4000_GET_USER_INFO: 3150 case ME4000_GET_USER_INFO:
3169 return me4000_get_user_info((me4000_user_info_t *) arg, 3151 return me4000_get_user_info((struct me4000_user_info *)arg,
3170 ai_context->board_info); 3152 ai_context->board_info);
3171 case ME4000_AI_OFFSET_ENABLE: 3153 case ME4000_AI_OFFSET_ENABLE:
3172 return me4000_ai_offset_enable(ai_context); 3154 return me4000_ai_offset_enable(ai_context);
@@ -3177,9 +3159,11 @@ static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
3177 case ME4000_AI_FULLSCALE_DISABLE: 3159 case ME4000_AI_FULLSCALE_DISABLE:
3178 return me4000_ai_fullscale_disable(ai_context); 3160 return me4000_ai_fullscale_disable(ai_context);
3179 case ME4000_AI_EEPROM_READ: 3161 case ME4000_AI_EEPROM_READ:
3180 return me4000_eeprom_read((me4000_eeprom_t *) arg, ai_context); 3162 return me4000_eeprom_read((struct me4000_eeprom *)arg,
3163 ai_context);
3181 case ME4000_AI_EEPROM_WRITE: 3164 case ME4000_AI_EEPROM_WRITE:
3182 return me4000_eeprom_write((me4000_eeprom_t *) arg, ai_context); 3165 return me4000_eeprom_write((struct me4000_eeprom *)arg,
3166 ai_context);
3183 default: 3167 default:
3184 printk(KERN_ERR 3168 printk(KERN_ERR
3185 "me4000_ai_ioctl_sing():Invalid service number\n"); 3169 "me4000_ai_ioctl_sing():Invalid service number\n");
@@ -3188,10 +3172,10 @@ static int me4000_ai_ioctl_sing(struct inode *inode_p, struct file *file_p,
3188 return 0; 3172 return 0;
3189} 3173}
3190 3174
3191static int me4000_ai_single(me4000_ai_single_t * arg, 3175static int me4000_ai_single(struct me4000_ai_single *arg,
3192 me4000_ai_context_t * ai_context) 3176 struct me4000_ai_context *ai_context)
3193{ 3177{
3194 me4000_ai_single_t cmd; 3178 struct me4000_ai_single cmd;
3195 int err; 3179 int err;
3196 u32 tmp; 3180 u32 tmp;
3197 wait_queue_head_t queue; 3181 wait_queue_head_t queue;
@@ -3202,7 +3186,7 @@ static int me4000_ai_single(me4000_ai_single_t * arg,
3202 init_waitqueue_head(&queue); 3186 init_waitqueue_head(&queue);
3203 3187
3204 /* Copy data from user */ 3188 /* Copy data from user */
3205 err = copy_from_user(&cmd, arg, sizeof(me4000_ai_single_t)); 3189 err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_single));
3206 if (err) { 3190 if (err) {
3207 printk(KERN_ERR 3191 printk(KERN_ERR
3208 "ME4000:me4000_ai_single():Can't copy from user space\n"); 3192 "ME4000:me4000_ai_single():Can't copy from user space\n");
@@ -3301,7 +3285,7 @@ static int me4000_ai_single(me4000_ai_single_t * arg,
3301 cmd.value = me4000_inl(ai_context->data_reg) & 0xFFFF; 3285 cmd.value = me4000_inl(ai_context->data_reg) & 0xFFFF;
3302 3286
3303 /* Copy result back to user */ 3287 /* Copy result back to user */
3304 err = copy_to_user(arg, &cmd, sizeof(me4000_ai_single_t)); 3288 err = copy_to_user(arg, &cmd, sizeof(struct me4000_ai_single));
3305 if (err) { 3289 if (err) {
3306 printk(KERN_ERR 3290 printk(KERN_ERR
3307 "ME4000:me4000_ai_single():Can't copy to user space\n"); 3291 "ME4000:me4000_ai_single():Can't copy to user space\n");
@@ -3314,7 +3298,7 @@ static int me4000_ai_single(me4000_ai_single_t * arg,
3314static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p, 3298static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
3315 unsigned int service, unsigned long arg) 3299 unsigned int service, unsigned long arg)
3316{ 3300{
3317 me4000_ai_context_t *ai_context; 3301 struct me4000_ai_context *ai_context;
3318 3302
3319 CALL_PDEBUG("me4000_ai_ioctl_sw() is executed\n"); 3303 CALL_PDEBUG("me4000_ai_ioctl_sw() is executed\n");
3320 3304
@@ -3332,9 +3316,11 @@ static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
3332 3316
3333 switch (service) { 3317 switch (service) {
3334 case ME4000_AI_SC_SETUP: 3318 case ME4000_AI_SC_SETUP:
3335 return me4000_ai_sc_setup((me4000_ai_sc_t *) arg, ai_context); 3319 return me4000_ai_sc_setup((struct me4000_ai_sc *)arg,
3320 ai_context);
3336 case ME4000_AI_CONFIG: 3321 case ME4000_AI_CONFIG:
3337 return me4000_ai_config((me4000_ai_config_t *) arg, ai_context); 3322 return me4000_ai_config((struct me4000_ai_config *)arg,
3323 ai_context);
3338 case ME4000_AI_START: 3324 case ME4000_AI_START:
3339 return me4000_ai_start(ai_context); 3325 return me4000_ai_start(ai_context);
3340 case ME4000_AI_STOP: 3326 case ME4000_AI_STOP:
@@ -3344,19 +3330,20 @@ static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
3344 case ME4000_AI_FSM_STATE: 3330 case ME4000_AI_FSM_STATE:
3345 return me4000_ai_fsm_state((int *)arg, ai_context); 3331 return me4000_ai_fsm_state((int *)arg, ai_context);
3346 case ME4000_GET_USER_INFO: 3332 case ME4000_GET_USER_INFO:
3347 return me4000_get_user_info((me4000_user_info_t *) arg, 3333 return me4000_get_user_info((struct me4000_user_info *)arg,
3348 ai_context->board_info); 3334 ai_context->board_info);
3349 case ME4000_AI_EEPROM_READ: 3335 case ME4000_AI_EEPROM_READ:
3350 return me4000_eeprom_read((me4000_eeprom_t *) arg, ai_context); 3336 return me4000_eeprom_read((struct me4000_eeprom *)arg,
3337 ai_context);
3351 case ME4000_AI_EEPROM_WRITE: 3338 case ME4000_AI_EEPROM_WRITE:
3352 return me4000_eeprom_write((me4000_eeprom_t *) arg, ai_context); 3339 return me4000_eeprom_write((struct me4000_eeprom *)arg,
3340 ai_context);
3353 case ME4000_AI_GET_COUNT_BUFFER: 3341 case ME4000_AI_GET_COUNT_BUFFER:
3354 return me4000_ai_get_count_buffer((unsigned long *)arg, 3342 return me4000_ai_get_count_buffer((unsigned long *)arg,
3355 ai_context); 3343 ai_context);
3356 default: 3344 default:
3357 printk(KERN_ERR 3345 printk(KERN_ERR
3358 "ME4000:me4000_ai_ioctl_sw():Invalid service number %d\n", 3346 "%s:Invalid service number %d\n", __func__, service);
3359 service);
3360 return -ENOTTY; 3347 return -ENOTTY;
3361 } 3348 }
3362 return 0; 3349 return 0;
@@ -3365,7 +3352,7 @@ static int me4000_ai_ioctl_sw(struct inode *inode_p, struct file *file_p,
3365static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p, 3352static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
3366 unsigned int service, unsigned long arg) 3353 unsigned int service, unsigned long arg)
3367{ 3354{
3368 me4000_ai_context_t *ai_context; 3355 struct me4000_ai_context *ai_context;
3369 3356
3370 CALL_PDEBUG("me4000_ai_ioctl_ext() is executed\n"); 3357 CALL_PDEBUG("me4000_ai_ioctl_ext() is executed\n");
3371 3358
@@ -3383,9 +3370,11 @@ static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
3383 3370
3384 switch (service) { 3371 switch (service) {
3385 case ME4000_AI_SC_SETUP: 3372 case ME4000_AI_SC_SETUP:
3386 return me4000_ai_sc_setup((me4000_ai_sc_t *) arg, ai_context); 3373 return me4000_ai_sc_setup((struct me4000_ai_sc *)arg,
3374 ai_context);
3387 case ME4000_AI_CONFIG: 3375 case ME4000_AI_CONFIG:
3388 return me4000_ai_config((me4000_ai_config_t *) arg, ai_context); 3376 return me4000_ai_config((struct me4000_ai_config *)arg,
3377 ai_context);
3389 case ME4000_AI_START: 3378 case ME4000_AI_START:
3390 return me4000_ai_start_ex((unsigned long *)arg, ai_context); 3379 return me4000_ai_start_ex((unsigned long *)arg, ai_context);
3391 case ME4000_AI_STOP: 3380 case ME4000_AI_STOP:
@@ -3397,20 +3386,19 @@ static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
3397 case ME4000_AI_EX_TRIG_DISABLE: 3386 case ME4000_AI_EX_TRIG_DISABLE:
3398 return me4000_ai_ex_trig_disable(ai_context); 3387 return me4000_ai_ex_trig_disable(ai_context);
3399 case ME4000_AI_EX_TRIG_SETUP: 3388 case ME4000_AI_EX_TRIG_SETUP:
3400 return me4000_ai_ex_trig_setup((me4000_ai_trigger_t *) arg, 3389 return me4000_ai_ex_trig_setup((struct me4000_ai_trigger *)arg,
3401 ai_context); 3390 ai_context);
3402 case ME4000_AI_FSM_STATE: 3391 case ME4000_AI_FSM_STATE:
3403 return me4000_ai_fsm_state((int *)arg, ai_context); 3392 return me4000_ai_fsm_state((int *)arg, ai_context);
3404 case ME4000_GET_USER_INFO: 3393 case ME4000_GET_USER_INFO:
3405 return me4000_get_user_info((me4000_user_info_t *) arg, 3394 return me4000_get_user_info((struct me4000_user_info *)arg,
3406 ai_context->board_info); 3395 ai_context->board_info);
3407 case ME4000_AI_GET_COUNT_BUFFER: 3396 case ME4000_AI_GET_COUNT_BUFFER:
3408 return me4000_ai_get_count_buffer((unsigned long *)arg, 3397 return me4000_ai_get_count_buffer((unsigned long *)arg,
3409 ai_context); 3398 ai_context);
3410 default: 3399 default:
3411 printk(KERN_ERR 3400 printk(KERN_ERR
3412 "ME4000:me4000_ai_ioctl_ext():Invalid service number %d\n", 3401 "%s:Invalid service number %d\n", __func__ , service);
3413 service);
3414 return -ENOTTY; 3402 return -ENOTTY;
3415 } 3403 }
3416 return 0; 3404 return 0;
@@ -3418,7 +3406,7 @@ static int me4000_ai_ioctl_ext(struct inode *inode_p, struct file *file_p,
3418 3406
3419static int me4000_ai_fasync(int fd, struct file *file_p, int mode) 3407static int me4000_ai_fasync(int fd, struct file *file_p, int mode)
3420{ 3408{
3421 me4000_ai_context_t *ai_context; 3409 struct me4000_ai_context *ai_context;
3422 3410
3423 CALL_PDEBUG("me4000_ao_fasync_cont() is executed\n"); 3411 CALL_PDEBUG("me4000_ao_fasync_cont() is executed\n");
3424 3412
@@ -3426,10 +3414,10 @@ static int me4000_ai_fasync(int fd, struct file *file_p, int mode)
3426 return fasync_helper(fd, file_p, mode, &ai_context->fasync_p); 3414 return fasync_helper(fd, file_p, mode, &ai_context->fasync_p);
3427} 3415}
3428 3416
3429static int me4000_ai_config(me4000_ai_config_t * arg, 3417static int me4000_ai_config(struct me4000_ai_config *arg,
3430 me4000_ai_context_t * ai_context) 3418 struct me4000_ai_context *ai_context)
3431{ 3419{
3432 me4000_ai_config_t cmd; 3420 struct me4000_ai_config cmd;
3433 u32 *list = NULL; 3421 u32 *list = NULL;
3434 u32 mode; 3422 u32 mode;
3435 int i; 3423 int i;
@@ -3451,7 +3439,7 @@ static int me4000_ai_config(me4000_ai_config_t * arg,
3451 } 3439 }
3452 3440
3453 /* Copy data from user */ 3441 /* Copy data from user */
3454 err = copy_from_user(&cmd, arg, sizeof(me4000_ai_config_t)); 3442 err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_config));
3455 if (err) { 3443 if (err) {
3456 printk(KERN_ERR 3444 printk(KERN_ERR
3457 "ME4000:me4000_ai_config():Can't copy from user space\n"); 3445 "ME4000:me4000_ai_config():Can't copy from user space\n");
@@ -3671,7 +3659,7 @@ static int me4000_ai_config(me4000_ai_config_t * arg,
3671 3659
3672 return 0; 3660 return 0;
3673 3661
3674 AI_CONFIG_ERR: 3662AI_CONFIG_ERR:
3675 3663
3676 /* Reset the timers */ 3664 /* Reset the timers */
3677 ai_context->chan_timer = 66; 3665 ai_context->chan_timer = 66;
@@ -3699,7 +3687,7 @@ static int me4000_ai_config(me4000_ai_config_t * arg,
3699 3687
3700} 3688}
3701 3689
3702static int ai_common_start(me4000_ai_context_t * ai_context) 3690static int ai_common_start(struct me4000_ai_context *ai_context)
3703{ 3691{
3704 u32 tmp; 3692 u32 tmp;
3705 CALL_PDEBUG("ai_common_start() is executed\n"); 3693 CALL_PDEBUG("ai_common_start() is executed\n");
@@ -3762,7 +3750,7 @@ static int ai_common_start(me4000_ai_context_t * ai_context)
3762 return 0; 3750 return 0;
3763} 3751}
3764 3752
3765static int me4000_ai_start(me4000_ai_context_t * ai_context) 3753static int me4000_ai_start(struct me4000_ai_context *ai_context)
3766{ 3754{
3767 int err; 3755 int err;
3768 CALL_PDEBUG("me4000_ai_start() is executed\n"); 3756 CALL_PDEBUG("me4000_ai_start() is executed\n");
@@ -3779,7 +3767,7 @@ static int me4000_ai_start(me4000_ai_context_t * ai_context)
3779} 3767}
3780 3768
3781static int me4000_ai_start_ex(unsigned long *arg, 3769static int me4000_ai_start_ex(unsigned long *arg,
3782 me4000_ai_context_t * ai_context) 3770 struct me4000_ai_context *ai_context)
3783{ 3771{
3784 int err; 3772 int err;
3785 wait_queue_head_t queue; 3773 wait_queue_head_t queue;
@@ -3834,7 +3822,7 @@ static int me4000_ai_start_ex(unsigned long *arg,
3834 return 0; 3822 return 0;
3835} 3823}
3836 3824
3837static int me4000_ai_stop(me4000_ai_context_t * ai_context) 3825static int me4000_ai_stop(struct me4000_ai_context *ai_context)
3838{ 3826{
3839 wait_queue_head_t queue; 3827 wait_queue_head_t queue;
3840 u32 tmp; 3828 u32 tmp;
@@ -3871,7 +3859,7 @@ static int me4000_ai_stop(me4000_ai_context_t * ai_context)
3871 return 0; 3859 return 0;
3872} 3860}
3873 3861
3874static int me4000_ai_immediate_stop(me4000_ai_context_t * ai_context) 3862static int me4000_ai_immediate_stop(struct me4000_ai_context *ai_context)
3875{ 3863{
3876 wait_queue_head_t queue; 3864 wait_queue_head_t queue;
3877 u32 tmp; 3865 u32 tmp;
@@ -3908,7 +3896,7 @@ static int me4000_ai_immediate_stop(me4000_ai_context_t * ai_context)
3908 return 0; 3896 return 0;
3909} 3897}
3910 3898
3911static int me4000_ai_ex_trig_enable(me4000_ai_context_t * ai_context) 3899static int me4000_ai_ex_trig_enable(struct me4000_ai_context *ai_context)
3912{ 3900{
3913 u32 tmp; 3901 u32 tmp;
3914 unsigned long flags; 3902 unsigned long flags;
@@ -3924,7 +3912,7 @@ static int me4000_ai_ex_trig_enable(me4000_ai_context_t * ai_context)
3924 return 0; 3912 return 0;
3925} 3913}
3926 3914
3927static int me4000_ai_ex_trig_disable(me4000_ai_context_t * ai_context) 3915static int me4000_ai_ex_trig_disable(struct me4000_ai_context *ai_context)
3928{ 3916{
3929 u32 tmp; 3917 u32 tmp;
3930 unsigned long flags; 3918 unsigned long flags;
@@ -3940,10 +3928,10 @@ static int me4000_ai_ex_trig_disable(me4000_ai_context_t * ai_context)
3940 return 0; 3928 return 0;
3941} 3929}
3942 3930
3943static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t * arg, 3931static int me4000_ai_ex_trig_setup(struct me4000_ai_trigger *arg,
3944 me4000_ai_context_t * ai_context) 3932 struct me4000_ai_context *ai_context)
3945{ 3933{
3946 me4000_ai_trigger_t cmd; 3934 struct me4000_ai_trigger cmd;
3947 int err; 3935 int err;
3948 u32 tmp; 3936 u32 tmp;
3949 unsigned long flags; 3937 unsigned long flags;
@@ -3951,7 +3939,7 @@ static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t * arg,
3951 CALL_PDEBUG("me4000_ai_ex_trig_setup() is executed\n"); 3939 CALL_PDEBUG("me4000_ai_ex_trig_setup() is executed\n");
3952 3940
3953 /* Copy data from user */ 3941 /* Copy data from user */
3954 err = copy_from_user(&cmd, arg, sizeof(me4000_ai_trigger_t)); 3942 err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_trigger));
3955 if (err) { 3943 if (err) {
3956 printk(KERN_ERR 3944 printk(KERN_ERR
3957 "ME4000:me4000_ai_ex_trig_setup():Can't copy from user space\n"); 3945 "ME4000:me4000_ai_ex_trig_setup():Can't copy from user space\n");
@@ -4000,16 +3988,16 @@ static int me4000_ai_ex_trig_setup(me4000_ai_trigger_t * arg,
4000 return 0; 3988 return 0;
4001} 3989}
4002 3990
4003static int me4000_ai_sc_setup(me4000_ai_sc_t * arg, 3991static int me4000_ai_sc_setup(struct me4000_ai_sc *arg,
4004 me4000_ai_context_t * ai_context) 3992 struct me4000_ai_context *ai_context)
4005{ 3993{
4006 me4000_ai_sc_t cmd; 3994 struct me4000_ai_sc cmd;
4007 int err; 3995 int err;
4008 3996
4009 CALL_PDEBUG("me4000_ai_sc_setup() is executed\n"); 3997 CALL_PDEBUG("me4000_ai_sc_setup() is executed\n");
4010 3998
4011 /* Copy data from user */ 3999 /* Copy data from user */
4012 err = copy_from_user(&cmd, arg, sizeof(me4000_ai_sc_t)); 4000 err = copy_from_user(&cmd, arg, sizeof(struct me4000_ai_sc));
4013 if (err) { 4001 if (err) {
4014 printk(KERN_ERR 4002 printk(KERN_ERR
4015 "ME4000:me4000_ai_sc_setup():Can't copy from user space\n"); 4003 "ME4000:me4000_ai_sc_setup():Can't copy from user space\n");
@@ -4023,9 +4011,9 @@ static int me4000_ai_sc_setup(me4000_ai_sc_t * arg,
4023} 4011}
4024 4012
4025static ssize_t me4000_ai_read(struct file *filep, char *buff, size_t cnt, 4013static ssize_t me4000_ai_read(struct file *filep, char *buff, size_t cnt,
4026 loff_t * offp) 4014 loff_t *offp)
4027{ 4015{
4028 me4000_ai_context_t *ai_context = filep->private_data; 4016 struct me4000_ai_context *ai_context = filep->private_data;
4029 s16 *buffer = (s16 *) buff; 4017 s16 *buffer = (s16 *) buff;
4030 size_t count = cnt / 2; 4018 size_t count = cnt / 2;
4031 unsigned long flags; 4019 unsigned long flags;
@@ -4150,9 +4138,9 @@ static ssize_t me4000_ai_read(struct file *filep, char *buff, size_t cnt,
4150 return ret * 2; 4138 return ret * 2;
4151} 4139}
4152 4140
4153static unsigned int me4000_ai_poll(struct file *file_p, poll_table * wait) 4141static unsigned int me4000_ai_poll(struct file *file_p, poll_table *wait)
4154{ 4142{
4155 me4000_ai_context_t *ai_context; 4143 struct me4000_ai_context *ai_context;
4156 unsigned long mask = 0; 4144 unsigned long mask = 0;
4157 4145
4158 CALL_PDEBUG("me4000_ai_poll() is executed\n"); 4146 CALL_PDEBUG("me4000_ai_poll() is executed\n");
@@ -4171,7 +4159,7 @@ static unsigned int me4000_ai_poll(struct file *file_p, poll_table * wait)
4171 return mask; 4159 return mask;
4172} 4160}
4173 4161
4174static int me4000_ai_offset_enable(me4000_ai_context_t * ai_context) 4162static int me4000_ai_offset_enable(struct me4000_ai_context *ai_context)
4175{ 4163{
4176 unsigned long tmp; 4164 unsigned long tmp;
4177 4165
@@ -4184,7 +4172,7 @@ static int me4000_ai_offset_enable(me4000_ai_context_t * ai_context)
4184 return 0; 4172 return 0;
4185} 4173}
4186 4174
4187static int me4000_ai_offset_disable(me4000_ai_context_t * ai_context) 4175static int me4000_ai_offset_disable(struct me4000_ai_context *ai_context)
4188{ 4176{
4189 unsigned long tmp; 4177 unsigned long tmp;
4190 4178
@@ -4197,7 +4185,7 @@ static int me4000_ai_offset_disable(me4000_ai_context_t * ai_context)
4197 return 0; 4185 return 0;
4198} 4186}
4199 4187
4200static int me4000_ai_fullscale_enable(me4000_ai_context_t * ai_context) 4188static int me4000_ai_fullscale_enable(struct me4000_ai_context *ai_context)
4201{ 4189{
4202 unsigned long tmp; 4190 unsigned long tmp;
4203 4191
@@ -4210,7 +4198,7 @@ static int me4000_ai_fullscale_enable(me4000_ai_context_t * ai_context)
4210 return 0; 4198 return 0;
4211} 4199}
4212 4200
4213static int me4000_ai_fullscale_disable(me4000_ai_context_t * ai_context) 4201static int me4000_ai_fullscale_disable(struct me4000_ai_context *ai_context)
4214{ 4202{
4215 unsigned long tmp; 4203 unsigned long tmp;
4216 4204
@@ -4223,7 +4211,7 @@ static int me4000_ai_fullscale_disable(me4000_ai_context_t * ai_context)
4223 return 0; 4211 return 0;
4224} 4212}
4225 4213
4226static int me4000_ai_fsm_state(int *arg, me4000_ai_context_t * ai_context) 4214static int me4000_ai_fsm_state(int *arg, struct me4000_ai_context *ai_context)
4227{ 4215{
4228 unsigned long tmp; 4216 unsigned long tmp;
4229 4217
@@ -4242,7 +4230,7 @@ static int me4000_ai_fsm_state(int *arg, me4000_ai_context_t * ai_context)
4242} 4230}
4243 4231
4244static int me4000_ai_get_count_buffer(unsigned long *arg, 4232static int me4000_ai_get_count_buffer(unsigned long *arg,
4245 me4000_ai_context_t * ai_context) 4233 struct me4000_ai_context *ai_context)
4246{ 4234{
4247 unsigned long c; 4235 unsigned long c;
4248 int err; 4236 int err;
@@ -4252,7 +4240,7 @@ static int me4000_ai_get_count_buffer(unsigned long *arg,
4252 err = copy_to_user(arg, &c, sizeof(unsigned long)); 4240 err = copy_to_user(arg, &c, sizeof(unsigned long));
4253 if (err) { 4241 if (err) {
4254 printk(KERN_ERR 4242 printk(KERN_ERR
4255 "ME4000:me4000_ai_get_count_buffer():Can't copy to user space\n"); 4243 "%s:Can't copy to user space\n", __func__);
4256 return -EFAULT; 4244 return -EFAULT;
4257 } 4245 }
4258 4246
@@ -4261,7 +4249,7 @@ static int me4000_ai_get_count_buffer(unsigned long *arg,
4261 4249
4262/*---------------------------------- EEPROM stuff ---------------------------*/ 4250/*---------------------------------- EEPROM stuff ---------------------------*/
4263 4251
4264static int eeprom_write_cmd(me4000_ai_context_t * ai_context, unsigned long cmd, 4252static int eeprom_write_cmd(struct me4000_ai_context *ai_context, unsigned long cmd,
4265 int length) 4253 int length)
4266{ 4254{
4267 int i; 4255 int i;
@@ -4318,7 +4306,7 @@ static int eeprom_write_cmd(me4000_ai_context_t * ai_context, unsigned long cmd,
4318 return 0; 4306 return 0;
4319} 4307}
4320 4308
4321static unsigned short eeprom_read_cmd(me4000_ai_context_t * ai_context, 4309static unsigned short eeprom_read_cmd(struct me4000_ai_context *ai_context,
4322 unsigned long cmd, int length) 4310 unsigned long cmd, int length)
4323{ 4311{
4324 int i; 4312 int i;
@@ -4397,11 +4385,11 @@ static unsigned short eeprom_read_cmd(me4000_ai_context_t * ai_context,
4397 return id; 4385 return id;
4398} 4386}
4399 4387
4400static int me4000_eeprom_write(me4000_eeprom_t * arg, 4388static int me4000_eeprom_write(struct me4000_eeprom *arg,
4401 me4000_ai_context_t * ai_context) 4389 struct me4000_ai_context *ai_context)
4402{ 4390{
4403 int err; 4391 int err;
4404 me4000_eeprom_t setup; 4392 struct me4000_eeprom setup;
4405 unsigned long cmd; 4393 unsigned long cmd;
4406 unsigned long date_high; 4394 unsigned long date_high;
4407 unsigned long date_low; 4395 unsigned long date_low;
@@ -4594,12 +4582,12 @@ static int me4000_eeprom_write(me4000_eeprom_t * arg,
4594 return 0; 4582 return 0;
4595} 4583}
4596 4584
4597static int me4000_eeprom_read(me4000_eeprom_t * arg, 4585static int me4000_eeprom_read(struct me4000_eeprom *arg,
4598 me4000_ai_context_t * ai_context) 4586 struct me4000_ai_context *ai_context)
4599{ 4587{
4600 int err; 4588 int err;
4601 unsigned long cmd; 4589 unsigned long cmd;
4602 me4000_eeprom_t setup; 4590 struct me4000_eeprom setup;
4603 4591
4604 CALL_PDEBUG("me4000_eeprom_read() is executed\n"); 4592 CALL_PDEBUG("me4000_eeprom_read() is executed\n");
4605 4593
@@ -4687,7 +4675,7 @@ static int me4000_eeprom_read(me4000_eeprom_t * arg,
4687static int me4000_dio_ioctl(struct inode *inode_p, struct file *file_p, 4675static int me4000_dio_ioctl(struct inode *inode_p, struct file *file_p,
4688 unsigned int service, unsigned long arg) 4676 unsigned int service, unsigned long arg)
4689{ 4677{
4690 me4000_dio_context_t *dio_context; 4678 struct me4000_dio_context *dio_context;
4691 4679
4692 CALL_PDEBUG("me4000_dio_ioctl() is executed\n"); 4680 CALL_PDEBUG("me4000_dio_ioctl() is executed\n");
4693 4681
@@ -4704,13 +4692,13 @@ static int me4000_dio_ioctl(struct inode *inode_p, struct file *file_p,
4704 4692
4705 switch (service) { 4693 switch (service) {
4706 case ME4000_DIO_CONFIG: 4694 case ME4000_DIO_CONFIG:
4707 return me4000_dio_config((me4000_dio_config_t *) arg, 4695 return me4000_dio_config((struct me4000_dio_config *)arg,
4708 dio_context); 4696 dio_context);
4709 case ME4000_DIO_SET_BYTE: 4697 case ME4000_DIO_SET_BYTE:
4710 return me4000_dio_set_byte((me4000_dio_byte_t *) arg, 4698 return me4000_dio_set_byte((struct me4000_dio_byte *)arg,
4711 dio_context); 4699 dio_context);
4712 case ME4000_DIO_GET_BYTE: 4700 case ME4000_DIO_GET_BYTE:
4713 return me4000_dio_get_byte((me4000_dio_byte_t *) arg, 4701 return me4000_dio_get_byte((struct me4000_dio_byte *)arg,
4714 dio_context); 4702 dio_context);
4715 case ME4000_DIO_RESET: 4703 case ME4000_DIO_RESET:
4716 return me4000_dio_reset(dio_context); 4704 return me4000_dio_reset(dio_context);
@@ -4723,17 +4711,17 @@ static int me4000_dio_ioctl(struct inode *inode_p, struct file *file_p,
4723 return 0; 4711 return 0;
4724} 4712}
4725 4713
4726static int me4000_dio_config(me4000_dio_config_t * arg, 4714static int me4000_dio_config(struct me4000_dio_config *arg,
4727 me4000_dio_context_t * dio_context) 4715 struct me4000_dio_context *dio_context)
4728{ 4716{
4729 me4000_dio_config_t cmd; 4717 struct me4000_dio_config cmd;
4730 u32 tmp; 4718 u32 tmp;
4731 int err; 4719 int err;
4732 4720
4733 CALL_PDEBUG("me4000_dio_config() is executed\n"); 4721 CALL_PDEBUG("me4000_dio_config() is executed\n");
4734 4722
4735 /* Copy data from user */ 4723 /* Copy data from user */
4736 err = copy_from_user(&cmd, arg, sizeof(me4000_dio_config_t)); 4724 err = copy_from_user(&cmd, arg, sizeof(struct me4000_dio_config));
4737 if (err) { 4725 if (err) {
4738 printk(KERN_ERR 4726 printk(KERN_ERR
4739 "ME4000:me4000_dio_config():Can't copy from user space\n"); 4727 "ME4000:me4000_dio_config():Can't copy from user space\n");
@@ -4964,16 +4952,16 @@ static int me4000_dio_config(me4000_dio_config_t * arg,
4964 return 0; 4952 return 0;
4965} 4953}
4966 4954
4967static int me4000_dio_set_byte(me4000_dio_byte_t * arg, 4955static int me4000_dio_set_byte(struct me4000_dio_byte *arg,
4968 me4000_dio_context_t * dio_context) 4956 struct me4000_dio_context *dio_context)
4969{ 4957{
4970 me4000_dio_byte_t cmd; 4958 struct me4000_dio_byte cmd;
4971 int err; 4959 int err;
4972 4960
4973 CALL_PDEBUG("me4000_dio_set_byte() is executed\n"); 4961 CALL_PDEBUG("me4000_dio_set_byte() is executed\n");
4974 4962
4975 /* Copy data from user */ 4963 /* Copy data from user */
4976 err = copy_from_user(&cmd, arg, sizeof(me4000_dio_byte_t)); 4964 err = copy_from_user(&cmd, arg, sizeof(struct me4000_dio_byte));
4977 if (err) { 4965 if (err) {
4978 printk(KERN_ERR 4966 printk(KERN_ERR
4979 "ME4000:me4000_dio_set_byte():Can't copy from user space\n"); 4967 "ME4000:me4000_dio_set_byte():Can't copy from user space\n");
@@ -5030,16 +5018,16 @@ static int me4000_dio_set_byte(me4000_dio_byte_t * arg,
5030 return 0; 5018 return 0;
5031} 5019}
5032 5020
5033static int me4000_dio_get_byte(me4000_dio_byte_t * arg, 5021static int me4000_dio_get_byte(struct me4000_dio_byte *arg,
5034 me4000_dio_context_t * dio_context) 5022 struct me4000_dio_context *dio_context)
5035{ 5023{
5036 me4000_dio_byte_t cmd; 5024 struct me4000_dio_byte cmd;
5037 int err; 5025 int err;
5038 5026
5039 CALL_PDEBUG("me4000_dio_get_byte() is executed\n"); 5027 CALL_PDEBUG("me4000_dio_get_byte() is executed\n");
5040 5028
5041 /* Copy data from user */ 5029 /* Copy data from user */
5042 err = copy_from_user(&cmd, arg, sizeof(me4000_dio_byte_t)); 5030 err = copy_from_user(&cmd, arg, sizeof(struct me4000_dio_byte));
5043 if (err) { 5031 if (err) {
5044 printk(KERN_ERR 5032 printk(KERN_ERR
5045 "ME4000:me4000_dio_get_byte():Can't copy from user space\n"); 5033 "ME4000:me4000_dio_get_byte():Can't copy from user space\n");
@@ -5070,7 +5058,7 @@ static int me4000_dio_get_byte(me4000_dio_byte_t * arg,
5070 } 5058 }
5071 5059
5072 /* Copy result back to user */ 5060 /* Copy result back to user */
5073 err = copy_to_user(arg, &cmd, sizeof(me4000_dio_byte_t)); 5061 err = copy_to_user(arg, &cmd, sizeof(struct me4000_dio_byte));
5074 if (err) { 5062 if (err) {
5075 printk(KERN_ERR 5063 printk(KERN_ERR
5076 "ME4000:me4000_dio_get_byte():Can't copy to user space\n"); 5064 "ME4000:me4000_dio_get_byte():Can't copy to user space\n");
@@ -5080,7 +5068,7 @@ static int me4000_dio_get_byte(me4000_dio_byte_t * arg,
5080 return 0; 5068 return 0;
5081} 5069}
5082 5070
5083static int me4000_dio_reset(me4000_dio_context_t * dio_context) 5071static int me4000_dio_reset(struct me4000_dio_context *dio_context)
5084{ 5072{
5085 CALL_PDEBUG("me4000_dio_reset() is executed\n"); 5073 CALL_PDEBUG("me4000_dio_reset() is executed\n");
5086 5074
@@ -5101,7 +5089,7 @@ static int me4000_dio_reset(me4000_dio_context_t * dio_context)
5101static int me4000_cnt_ioctl(struct inode *inode_p, struct file *file_p, 5089static int me4000_cnt_ioctl(struct inode *inode_p, struct file *file_p,
5102 unsigned int service, unsigned long arg) 5090 unsigned int service, unsigned long arg)
5103{ 5091{
5104 me4000_cnt_context_t *cnt_context; 5092 struct me4000_cnt_context *cnt_context;
5105 5093
5106 CALL_PDEBUG("me4000_cnt_ioctl() is executed\n"); 5094 CALL_PDEBUG("me4000_cnt_ioctl() is executed\n");
5107 5095
@@ -5118,11 +5106,11 @@ static int me4000_cnt_ioctl(struct inode *inode_p, struct file *file_p,
5118 5106
5119 switch (service) { 5107 switch (service) {
5120 case ME4000_CNT_READ: 5108 case ME4000_CNT_READ:
5121 return me4000_cnt_read((me4000_cnt_t *) arg, cnt_context); 5109 return me4000_cnt_read((struct me4000_cnt *)arg, cnt_context);
5122 case ME4000_CNT_WRITE: 5110 case ME4000_CNT_WRITE:
5123 return me4000_cnt_write((me4000_cnt_t *) arg, cnt_context); 5111 return me4000_cnt_write((struct me4000_cnt *)arg, cnt_context);
5124 case ME4000_CNT_CONFIG: 5112 case ME4000_CNT_CONFIG:
5125 return me4000_cnt_config((me4000_cnt_config_t *) arg, 5113 return me4000_cnt_config((struct me4000_cnt_config *)arg,
5126 cnt_context); 5114 cnt_context);
5127 case ME4000_CNT_RESET: 5115 case ME4000_CNT_RESET:
5128 return me4000_cnt_reset(cnt_context); 5116 return me4000_cnt_reset(cnt_context);
@@ -5135,10 +5123,10 @@ static int me4000_cnt_ioctl(struct inode *inode_p, struct file *file_p,
5135 return 0; 5123 return 0;
5136} 5124}
5137 5125
5138static int me4000_cnt_config(me4000_cnt_config_t * arg, 5126static int me4000_cnt_config(struct me4000_cnt_config *arg,
5139 me4000_cnt_context_t * cnt_context) 5127 struct me4000_cnt_context *cnt_context)
5140{ 5128{
5141 me4000_cnt_config_t cmd; 5129 struct me4000_cnt_config cmd;
5142 u8 counter; 5130 u8 counter;
5143 u8 mode; 5131 u8 mode;
5144 int err; 5132 int err;
@@ -5146,7 +5134,7 @@ static int me4000_cnt_config(me4000_cnt_config_t * arg,
5146 CALL_PDEBUG("me4000_cnt_config() is executed\n"); 5134 CALL_PDEBUG("me4000_cnt_config() is executed\n");
5147 5135
5148 /* Copy data from user */ 5136 /* Copy data from user */
5149 err = copy_from_user(&cmd, arg, sizeof(me4000_cnt_config_t)); 5137 err = copy_from_user(&cmd, arg, sizeof(struct me4000_cnt_config));
5150 if (err) { 5138 if (err) {
5151 printk(KERN_ERR 5139 printk(KERN_ERR
5152 "ME4000:me4000_cnt_config():Can't copy from user space\n"); 5140 "ME4000:me4000_cnt_config():Can't copy from user space\n");
@@ -5204,17 +5192,17 @@ static int me4000_cnt_config(me4000_cnt_config_t * arg,
5204 return 0; 5192 return 0;
5205} 5193}
5206 5194
5207static int me4000_cnt_read(me4000_cnt_t * arg, 5195static int me4000_cnt_read(struct me4000_cnt *arg,
5208 me4000_cnt_context_t * cnt_context) 5196 struct me4000_cnt_context *cnt_context)
5209{ 5197{
5210 me4000_cnt_t cmd; 5198 struct me4000_cnt cmd;
5211 u8 tmp; 5199 u8 tmp;
5212 int err; 5200 int err;
5213 5201
5214 CALL_PDEBUG("me4000_cnt_read() is executed\n"); 5202 CALL_PDEBUG("me4000_cnt_read() is executed\n");
5215 5203
5216 /* Copy data from user */ 5204 /* Copy data from user */
5217 err = copy_from_user(&cmd, arg, sizeof(me4000_cnt_t)); 5205 err = copy_from_user(&cmd, arg, sizeof(struct me4000_cnt));
5218 if (err) { 5206 if (err) {
5219 printk(KERN_ERR 5207 printk(KERN_ERR
5220 "ME4000:me4000_cnt_read():Can't copy from user space\n"); 5208 "ME4000:me4000_cnt_read():Can't copy from user space\n");
@@ -5249,7 +5237,7 @@ static int me4000_cnt_read(me4000_cnt_t * arg,
5249 } 5237 }
5250 5238
5251 /* Copy result back to user */ 5239 /* Copy result back to user */
5252 err = copy_to_user(arg, &cmd, sizeof(me4000_cnt_t)); 5240 err = copy_to_user(arg, &cmd, sizeof(struct me4000_cnt));
5253 if (err) { 5241 if (err) {
5254 printk(KERN_ERR 5242 printk(KERN_ERR
5255 "ME4000:me4000_cnt_read():Can't copy to user space\n"); 5243 "ME4000:me4000_cnt_read():Can't copy to user space\n");
@@ -5259,17 +5247,17 @@ static int me4000_cnt_read(me4000_cnt_t * arg,
5259 return 0; 5247 return 0;
5260} 5248}
5261 5249
5262static int me4000_cnt_write(me4000_cnt_t * arg, 5250static int me4000_cnt_write(struct me4000_cnt *arg,
5263 me4000_cnt_context_t * cnt_context) 5251 struct me4000_cnt_context *cnt_context)
5264{ 5252{
5265 me4000_cnt_t cmd; 5253 struct me4000_cnt cmd;
5266 u8 tmp; 5254 u8 tmp;
5267 int err; 5255 int err;
5268 5256
5269 CALL_PDEBUG("me4000_cnt_write() is executed\n"); 5257 CALL_PDEBUG("me4000_cnt_write() is executed\n");
5270 5258
5271 /* Copy data from user */ 5259 /* Copy data from user */
5272 err = copy_from_user(&cmd, arg, sizeof(me4000_cnt_t)); 5260 err = copy_from_user(&cmd, arg, sizeof(struct me4000_cnt));
5273 if (err) { 5261 if (err) {
5274 printk(KERN_ERR 5262 printk(KERN_ERR
5275 "ME4000:me4000_cnt_write():Can't copy from user space\n"); 5263 "ME4000:me4000_cnt_write():Can't copy from user space\n");
@@ -5306,7 +5294,7 @@ static int me4000_cnt_write(me4000_cnt_t * arg,
5306 return 0; 5294 return 0;
5307} 5295}
5308 5296
5309static int me4000_cnt_reset(me4000_cnt_context_t * cnt_context) 5297static int me4000_cnt_reset(struct me4000_cnt_context *cnt_context)
5310{ 5298{
5311 CALL_PDEBUG("me4000_cnt_reset() is executed\n"); 5299 CALL_PDEBUG("me4000_cnt_reset() is executed\n");
5312 5300
@@ -5333,7 +5321,7 @@ static int me4000_cnt_reset(me4000_cnt_context_t * cnt_context)
5333static int me4000_ext_int_ioctl(struct inode *inode_p, struct file *file_p, 5321static int me4000_ext_int_ioctl(struct inode *inode_p, struct file *file_p,
5334 unsigned int service, unsigned long arg) 5322 unsigned int service, unsigned long arg)
5335{ 5323{
5336 me4000_ext_int_context_t *ext_int_context; 5324 struct me4000_ext_int_context *ext_int_context;
5337 5325
5338 CALL_PDEBUG("me4000_ext_int_ioctl() is executed\n"); 5326 CALL_PDEBUG("me4000_ext_int_ioctl() is executed\n");
5339 5327
@@ -5366,7 +5354,7 @@ static int me4000_ext_int_ioctl(struct inode *inode_p, struct file *file_p,
5366 return 0; 5354 return 0;
5367} 5355}
5368 5356
5369static int me4000_ext_int_enable(me4000_ext_int_context_t * ext_int_context) 5357static int me4000_ext_int_enable(struct me4000_ext_int_context *ext_int_context)
5370{ 5358{
5371 unsigned long tmp; 5359 unsigned long tmp;
5372 5360
@@ -5379,7 +5367,7 @@ static int me4000_ext_int_enable(me4000_ext_int_context_t * ext_int_context)
5379 return 0; 5367 return 0;
5380} 5368}
5381 5369
5382static int me4000_ext_int_disable(me4000_ext_int_context_t * ext_int_context) 5370static int me4000_ext_int_disable(struct me4000_ext_int_context *ext_int_context)
5383{ 5371{
5384 unsigned long tmp; 5372 unsigned long tmp;
5385 5373
@@ -5393,7 +5381,7 @@ static int me4000_ext_int_disable(me4000_ext_int_context_t * ext_int_context)
5393} 5381}
5394 5382
5395static int me4000_ext_int_count(unsigned long *arg, 5383static int me4000_ext_int_count(unsigned long *arg,
5396 me4000_ext_int_context_t * ext_int_context) 5384 struct me4000_ext_int_context *ext_int_context)
5397{ 5385{
5398 5386
5399 CALL_PDEBUG("me4000_ext_int_count() is executed\n"); 5387 CALL_PDEBUG("me4000_ext_int_count() is executed\n");
@@ -5404,10 +5392,10 @@ static int me4000_ext_int_count(unsigned long *arg,
5404 5392
5405/*------------------------------------ General stuff ------------------------------------*/ 5393/*------------------------------------ General stuff ------------------------------------*/
5406 5394
5407static int me4000_get_user_info(me4000_user_info_t * arg, 5395static int me4000_get_user_info(struct me4000_user_info *arg,
5408 me4000_info_t * board_info) 5396 struct me4000_info *board_info)
5409{ 5397{
5410 me4000_user_info_t user_info; 5398 struct me4000_user_info user_info;
5411 5399
5412 CALL_PDEBUG("me4000_get_user_info() is executed\n"); 5400 CALL_PDEBUG("me4000_get_user_info() is executed\n");
5413 5401
@@ -5437,7 +5425,7 @@ static int me4000_get_user_info(me4000_user_info_t * arg,
5437 5425
5438 user_info.cnt_count = board_info->board_p->cnt.count; 5426 user_info.cnt_count = board_info->board_p->cnt.count;
5439 5427
5440 if (copy_to_user(arg, &user_info, sizeof(me4000_user_info_t))) 5428 if (copy_to_user(arg, &user_info, sizeof(struct me4000_user_info)))
5441 return -EFAULT; 5429 return -EFAULT;
5442 5430
5443 return 0; 5431 return 0;
@@ -5448,7 +5436,7 @@ static int me4000_get_user_info(me4000_user_info_t * arg,
5448static int me4000_ext_int_fasync(int fd, struct file *file_ptr, int mode) 5436static int me4000_ext_int_fasync(int fd, struct file *file_ptr, int mode)
5449{ 5437{
5450 int result = 0; 5438 int result = 0;
5451 me4000_ext_int_context_t *ext_int_context; 5439 struct me4000_ext_int_context *ext_int_context;
5452 5440
5453 CALL_PDEBUG("me4000_ext_int_fasync() is executed\n"); 5441 CALL_PDEBUG("me4000_ext_int_fasync() is executed\n");
5454 5442
@@ -5465,7 +5453,7 @@ static irqreturn_t me4000_ao_isr(int irq, void *dev_id)
5465{ 5453{
5466 u32 tmp; 5454 u32 tmp;
5467 u32 value; 5455 u32 value;
5468 me4000_ao_context_t *ao_context; 5456 struct me4000_ao_context *ao_context;
5469 int i; 5457 int i;
5470 int c = 0; 5458 int c = 0;
5471 int c1 = 0; 5459 int c1 = 0;
@@ -5589,7 +5577,7 @@ static irqreturn_t me4000_ao_isr(int irq, void *dev_id)
5589static irqreturn_t me4000_ai_isr(int irq, void *dev_id) 5577static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
5590{ 5578{
5591 u32 tmp; 5579 u32 tmp;
5592 me4000_ai_context_t *ai_context; 5580 struct me4000_ai_context *ai_context;
5593 int i; 5581 int i;
5594 int c = 0; 5582 int c = 0;
5595 int c1 = 0; 5583 int c1 = 0;
@@ -5933,7 +5921,7 @@ static irqreturn_t me4000_ai_isr(int irq, void *dev_id)
5933 5921
5934static irqreturn_t me4000_ext_int_isr(int irq, void *dev_id) 5922static irqreturn_t me4000_ext_int_isr(int irq, void *dev_id)
5935{ 5923{
5936 me4000_ext_int_context_t *ext_int_context; 5924 struct me4000_ext_int_context *ext_int_context;
5937 unsigned long tmp; 5925 unsigned long tmp;
5938 5926
5939 ISR_PDEBUG("me4000_ext_int_isr() is executed\n"); 5927 ISR_PDEBUG("me4000_ext_int_isr() is executed\n");
@@ -5969,10 +5957,10 @@ static irqreturn_t me4000_ext_int_isr(int irq, void *dev_id)
5969 return IRQ_HANDLED; 5957 return IRQ_HANDLED;
5970} 5958}
5971 5959
5972void __exit me4000_module_exit(void) 5960static void __exit me4000_module_exit(void)
5973{ 5961{
5974 struct list_head *board_p; 5962 struct list_head *board_p;
5975 me4000_info_t *board_info; 5963 struct me4000_info *board_info;
5976 5964
5977 CALL_PDEBUG("cleanup_module() is executed\n"); 5965 CALL_PDEBUG("cleanup_module() is executed\n");
5978 5966
@@ -5993,7 +5981,7 @@ void __exit me4000_module_exit(void)
5993 /* Reset the boards */ 5981 /* Reset the boards */
5994 for (board_p = me4000_board_info_list.next; 5982 for (board_p = me4000_board_info_list.next;
5995 board_p != &me4000_board_info_list; board_p = board_p->next) { 5983 board_p != &me4000_board_info_list; board_p = board_p->next) {
5996 board_info = list_entry(board_p, me4000_info_t, list); 5984 board_info = list_entry(board_p, struct me4000_info, list);
5997 me4000_reset_board(board_info); 5985 me4000_reset_board(board_info);
5998 } 5986 }
5999 5987
@@ -6007,7 +5995,7 @@ static int me4000_read_procmem(char *buf, char **start, off_t offset, int count,
6007{ 5995{
6008 int len = 0; 5996 int len = 0;
6009 int limit = count - 1000; 5997 int limit = count - 1000;
6010 me4000_info_t *board_info; 5998 struct me4000_info *board_info;
6011 struct list_head *ptr; 5999 struct list_head *ptr;
6012 6000
6013 len += sprintf(buf + len, "\nME4000 DRIVER VERSION %X.%X.%X\n\n", 6001 len += sprintf(buf + len, "\nME4000 DRIVER VERSION %X.%X.%X\n\n",
@@ -6019,7 +6007,7 @@ static int me4000_read_procmem(char *buf, char **start, off_t offset, int count,
6019 for (ptr = me4000_board_info_list.next; 6007 for (ptr = me4000_board_info_list.next;
6020 (ptr != &me4000_board_info_list) && (len < limit); 6008 (ptr != &me4000_board_info_list) && (len < limit);
6021 ptr = ptr->next) { 6009 ptr = ptr->next) {
6022 board_info = list_entry(ptr, me4000_info_t, list); 6010 board_info = list_entry(ptr, struct me4000_info, list);
6023 6011
6024 len += 6012 len +=
6025 sprintf(buf + len, "Board number %d:\n", 6013 sprintf(buf + len, "Board number %d:\n",
@@ -6029,14 +6017,14 @@ static int me4000_read_procmem(char *buf, char **start, off_t offset, int count,
6029 sprintf(buf + len, "PLX base register = 0x%lX\n", 6017 sprintf(buf + len, "PLX base register = 0x%lX\n",
6030 board_info->plx_regbase); 6018 board_info->plx_regbase);
6031 len += 6019 len +=
6032 sprintf(buf + len, "PLX base register size = 0x%lX\n", 6020 sprintf(buf + len, "PLX base register size = 0x%X\n",
6033 board_info->plx_regbase_size); 6021 (unsigned int)board_info->plx_regbase_size);
6034 len += 6022 len +=
6035 sprintf(buf + len, "ME4000 base register = 0x%lX\n", 6023 sprintf(buf + len, "ME4000 base register = 0x%X\n",
6036 board_info->me4000_regbase); 6024 (unsigned int)board_info->me4000_regbase);
6037 len += 6025 len +=
6038 sprintf(buf + len, "ME4000 base register size = 0x%lX\n", 6026 sprintf(buf + len, "ME4000 base register size = 0x%X\n",
6039 board_info->me4000_regbase_size); 6027 (unsigned int)board_info->me4000_regbase_size);
6040 len += 6028 len +=
6041 sprintf(buf + len, "Serial number = 0x%X\n", 6029 sprintf(buf + len, "Serial number = 0x%X\n",
6042 board_info->serial_no); 6030 board_info->serial_no);
diff --git a/drivers/staging/me4000/me4000.h b/drivers/staging/me4000/me4000.h
index c35e4b9793a0..81c6f4d5e25c 100644
--- a/drivers/staging/me4000/me4000.h
+++ b/drivers/staging/me4000/me4000.h
@@ -329,46 +329,46 @@
329 Circular buffer used for analog input/output reads/writes. 329 Circular buffer used for analog input/output reads/writes.
330 ===========================================================================*/ 330 ===========================================================================*/
331 331
332typedef struct me4000_circ_buf { 332struct me4000_circ_buf {
333 s16 *buf; 333 s16 *buf;
334 int volatile head; 334 int volatile head;
335 int volatile tail; 335 int volatile tail;
336} me4000_circ_buf_t; 336};
337 337
338/*============================================================================= 338/*=============================================================================
339 Information about the hardware capabilities 339 Information about the hardware capabilities
340 ===========================================================================*/ 340 ===========================================================================*/
341 341
342typedef struct me4000_ao_info { 342struct me4000_ao_info {
343 int count; 343 int count;
344 int fifo_count; 344 int fifo_count;
345} me4000_ao_info_t; 345};
346 346
347typedef struct me4000_ai_info { 347struct me4000_ai_info {
348 int count; 348 int count;
349 int sh_count; 349 int sh_count;
350 int diff_count; 350 int diff_count;
351 int ex_trig_analog; 351 int ex_trig_analog;
352} me4000_ai_info_t; 352};
353 353
354typedef struct me4000_dio_info { 354struct me4000_dio_info {
355 int count; 355 int count;
356} me4000_dio_info_t; 356};
357 357
358typedef struct me4000_cnt_info { 358struct me4000_cnt_info {
359 int count; 359 int count;
360} me4000_cnt_info_t; 360};
361 361
362typedef struct me4000_board { 362struct me4000_board {
363 u16 vendor_id; 363 u16 vendor_id;
364 u16 device_id; 364 u16 device_id;
365 me4000_ao_info_t ao; 365 struct me4000_ao_info ao;
366 me4000_ai_info_t ai; 366 struct me4000_ai_info ai;
367 me4000_dio_info_t dio; 367 struct me4000_dio_info dio;
368 me4000_cnt_info_t cnt; 368 struct me4000_cnt_info cnt;
369} me4000_board_t; 369};
370 370
371static me4000_board_t me4000_boards[] = { 371static struct me4000_board me4000_boards[] = {
372 {PCI_VENDOR_ID_MEILHAUS, 0x4610, {0, 0}, {16, 0, 0, 0}, {4}, {3}}, 372 {PCI_VENDOR_ID_MEILHAUS, 0x4610, {0, 0}, {16, 0, 0, 0}, {4}, {3}},
373 373
374 {PCI_VENDOR_ID_MEILHAUS, 0x4650, {0, 0}, {16, 0, 0, 0}, {4}, {0}}, 374 {PCI_VENDOR_ID_MEILHAUS, 0x4650, {0, 0}, {16, 0, 0, 0}, {4}, {0}},
@@ -391,8 +391,6 @@ static me4000_board_t me4000_boards[] = {
391 {0}, 391 {0},
392}; 392};
393 393
394#define ME4000_BOARD_VERSIONS (sizeof(me4000_boards) / sizeof(me4000_board_t) - 1)
395
396/*============================================================================= 394/*=============================================================================
397 PCI device table. 395 PCI device table.
398 This is used by modprobe to translate PCI IDs to drivers. 396 This is used by modprobe to translate PCI IDs to drivers.
@@ -427,19 +425,19 @@ MODULE_DEVICE_TABLE(pci, me4000_pci_table);
427 Global board and subdevice information structures 425 Global board and subdevice information structures
428 ===========================================================================*/ 426 ===========================================================================*/
429 427
430typedef struct me4000_info { 428struct me4000_info {
431 struct list_head list; // List of all detected boards 429 struct list_head list; // List of all detected boards
432 int board_count; // Index of the board after detection 430 int board_count; // Index of the board after detection
433 431
434 unsigned long plx_regbase; // PLX configuration space base address 432 unsigned long plx_regbase; // PLX configuration space base address
435 unsigned long me4000_regbase; // Base address of the ME4000 433 resource_size_t me4000_regbase; // Base address of the ME4000
436 unsigned long timer_regbase; // Base address of the timer circuit 434 resource_size_t timer_regbase; // Base address of the timer circuit
437 unsigned long program_regbase; // Base address to set the program pin for the xilinx 435 resource_size_t program_regbase; // Base address to set the program pin for the xilinx
438 436
439 unsigned long plx_regbase_size; // PLX register set space 437 unsigned long plx_regbase_size; // PLX register set space
440 unsigned long me4000_regbase_size; // ME4000 register set space 438 resource_size_t me4000_regbase_size; // ME4000 register set space
441 unsigned long timer_regbase_size; // Timer circuit register set space 439 resource_size_t timer_regbase_size; // Timer circuit register set space
442 unsigned long program_regbase_size; // Size of program base address of the ME4000 440 resource_size_t program_regbase_size; // Size of program base address of the ME4000
443 441
444 unsigned int serial_no; // Serial number of the board 442 unsigned int serial_no; // Serial number of the board
445 unsigned char hw_revision; // Hardware revision of the board 443 unsigned char hw_revision; // Hardware revision of the board
@@ -451,7 +449,7 @@ typedef struct me4000_info {
451 int pci_func_no; // PCI function number 449 int pci_func_no; // PCI function number
452 struct pci_dev *pci_dev_p; // General PCI information 450 struct pci_dev *pci_dev_p; // General PCI information
453 451
454 me4000_board_t *board_p; // Holds the board capabilities 452 struct me4000_board *board_p; // Holds the board capabilities
455 453
456 unsigned int irq; // IRQ assigned from the PCI BIOS 454 unsigned int irq; // IRQ assigned from the PCI BIOS
457 unsigned int irq_count; // Count of external interrupts 455 unsigned int irq_count; // Count of external interrupts
@@ -464,18 +462,18 @@ typedef struct me4000_info {
464 struct me4000_dio_context *dio_context; // Digital I/O specific context 462 struct me4000_dio_context *dio_context; // Digital I/O specific context
465 struct me4000_cnt_context *cnt_context; // Counter specific context 463 struct me4000_cnt_context *cnt_context; // Counter specific context
466 struct me4000_ext_int_context *ext_int_context; // External interrupt specific context 464 struct me4000_ext_int_context *ext_int_context; // External interrupt specific context
467} me4000_info_t; 465};
468 466
469typedef struct me4000_ao_context { 467struct me4000_ao_context {
470 struct list_head list; // linked list of me4000_ao_context_t 468 struct list_head list; // linked list of me4000_ao_context_t
471 int index; // Index in the list 469 int index; // Index in the list
472 int mode; // Indicates mode (0 = single, 1 = wraparound, 2 = continous) 470 int mode; // Indicates mode (0 = single, 1 = wraparound, 2 = continous)
473 int dac_in_use; // Indicates if already opend 471 int dac_in_use; // Indicates if already opend
474 spinlock_t use_lock; // Guards in_use 472 spinlock_t use_lock; // Guards in_use
475 spinlock_t int_lock; // Used when locking out interrupts 473 spinlock_t int_lock; // Used when locking out interrupts
476 me4000_circ_buf_t circ_buf; // Circular buffer 474 struct me4000_circ_buf circ_buf; // Circular buffer
477 wait_queue_head_t wait_queue; // Wait queue to sleep while blocking write 475 wait_queue_head_t wait_queue; // Wait queue to sleep while blocking write
478 me4000_info_t *board_info; 476 struct me4000_info *board_info;
479 unsigned int irq; // The irq associated with this ADC 477 unsigned int irq; // The irq associated with this ADC
480 int volatile pipe_flag; // Indicates broken pipe set from me4000_ao_isr() 478 int volatile pipe_flag; // Indicates broken pipe set from me4000_ao_isr()
481 unsigned long ctrl_reg; 479 unsigned long ctrl_reg;
@@ -486,9 +484,9 @@ typedef struct me4000_ao_context {
486 unsigned long irq_status_reg; 484 unsigned long irq_status_reg;
487 unsigned long preload_reg; 485 unsigned long preload_reg;
488 struct fasync_struct *fasync_p; // Queue for asynchronous notification 486 struct fasync_struct *fasync_p; // Queue for asynchronous notification
489} me4000_ao_context_t; 487};
490 488
491typedef struct me4000_ai_context { 489struct me4000_ai_context {
492 struct list_head list; // linked list of me4000_ai_info_t 490 struct list_head list; // linked list of me4000_ai_info_t
493 int mode; // Indicates mode 491 int mode; // Indicates mode
494 int in_use; // Indicates if already opend 492 int in_use; // Indicates if already opend
@@ -496,9 +494,9 @@ typedef struct me4000_ai_context {
496 spinlock_t int_lock; // Used when locking out interrupts 494 spinlock_t int_lock; // Used when locking out interrupts
497 int number; // Number of the DAC 495 int number; // Number of the DAC
498 unsigned int irq; // The irq associated with this ADC 496 unsigned int irq; // The irq associated with this ADC
499 me4000_circ_buf_t circ_buf; // Circular buffer 497 struct me4000_circ_buf circ_buf; // Circular buffer
500 wait_queue_head_t wait_queue; // Wait queue to sleep while blocking read 498 wait_queue_head_t wait_queue; // Wait queue to sleep while blocking read
501 me4000_info_t *board_info; 499 struct me4000_info *board_info;
502 500
503 struct fasync_struct *fasync_p; // Queue for asynchronous notification 501 struct fasync_struct *fasync_p; // Queue for asynchronous notification
504 502
@@ -523,48 +521,48 @@ typedef struct me4000_ai_context {
523 unsigned long channel_list_count; 521 unsigned long channel_list_count;
524 unsigned long sample_counter; 522 unsigned long sample_counter;
525 int sample_counter_reload; 523 int sample_counter_reload;
526} me4000_ai_context_t; 524};
527 525
528typedef struct me4000_dio_context { 526struct me4000_dio_context {
529 struct list_head list; // linked list of me4000_dio_context_t 527 struct list_head list; // linked list of me4000_dio_context_t
530 int in_use; // Indicates if already opend 528 int in_use; // Indicates if already opend
531 spinlock_t use_lock; // Guards in_use 529 spinlock_t use_lock; // Guards in_use
532 int number; 530 int number;
533 int dio_count; 531 int dio_count;
534 me4000_info_t *board_info; 532 struct me4000_info *board_info;
535 unsigned long dir_reg; 533 unsigned long dir_reg;
536 unsigned long ctrl_reg; 534 unsigned long ctrl_reg;
537 unsigned long port_0_reg; 535 unsigned long port_0_reg;
538 unsigned long port_1_reg; 536 unsigned long port_1_reg;
539 unsigned long port_2_reg; 537 unsigned long port_2_reg;
540 unsigned long port_3_reg; 538 unsigned long port_3_reg;
541} me4000_dio_context_t; 539};
542 540
543typedef struct me4000_cnt_context { 541struct me4000_cnt_context {
544 struct list_head list; // linked list of me4000_dio_context_t 542 struct list_head list; // linked list of me4000_dio_context_t
545 int in_use; // Indicates if already opend 543 int in_use; // Indicates if already opend
546 spinlock_t use_lock; // Guards in_use 544 spinlock_t use_lock; // Guards in_use
547 int number; 545 int number;
548 int cnt_count; 546 int cnt_count;
549 me4000_info_t *board_info; 547 struct me4000_info *board_info;
550 unsigned long ctrl_reg; 548 unsigned long ctrl_reg;
551 unsigned long counter_0_reg; 549 unsigned long counter_0_reg;
552 unsigned long counter_1_reg; 550 unsigned long counter_1_reg;
553 unsigned long counter_2_reg; 551 unsigned long counter_2_reg;
554} me4000_cnt_context_t; 552};
555 553
556typedef struct me4000_ext_int_context { 554struct me4000_ext_int_context {
557 struct list_head list; // linked list of me4000_dio_context_t 555 struct list_head list; // linked list of me4000_dio_context_t
558 int in_use; // Indicates if already opend 556 int in_use; // Indicates if already opend
559 spinlock_t use_lock; // Guards in_use 557 spinlock_t use_lock; // Guards in_use
560 int number; 558 int number;
561 me4000_info_t *board_info; 559 struct me4000_info *board_info;
562 unsigned int irq; 560 unsigned int irq;
563 unsigned long int_count; 561 unsigned long int_count;
564 struct fasync_struct *fasync_ptr; 562 struct fasync_struct *fasync_ptr;
565 unsigned long ctrl_reg; 563 unsigned long ctrl_reg;
566 unsigned long irq_status_reg; 564 unsigned long irq_status_reg;
567} me4000_ext_int_context_t; 565};
568 566
569#endif 567#endif
570 568
@@ -745,12 +743,12 @@ typedef struct me4000_ext_int_context {
745 General type definitions 743 General type definitions
746 ----------------------------------------------------------------------------*/ 744 ----------------------------------------------------------------------------*/
747 745
748typedef struct me4000_user_info { 746struct me4000_user_info {
749 int board_count; // Index of the board after detection 747 int board_count; // Index of the board after detection
750 unsigned long plx_regbase; // PLX configuration space base address 748 unsigned long plx_regbase; // PLX configuration space base address
751 unsigned long me4000_regbase; // Base address of the ME4000 749 resource_size_t me4000_regbase; // Base address of the ME4000
752 unsigned long plx_regbase_size; // PLX register set space 750 unsigned long plx_regbase_size; // PLX register set space
753 unsigned long me4000_regbase_size; // ME4000 register set space 751 resource_size_t me4000_regbase_size; // ME4000 register set space
754 unsigned long serial_no; // Serial number of the board 752 unsigned long serial_no; // Serial number of the board
755 unsigned char hw_revision; // Hardware revision of the board 753 unsigned char hw_revision; // Hardware revision of the board
756 unsigned short vendor_id; // Meilhaus vendor id (0x1402) 754 unsigned short vendor_id; // Meilhaus vendor id (0x1402)
@@ -773,62 +771,62 @@ typedef struct me4000_user_info {
773 int dio_count; // Count of digital I/O ports 771 int dio_count; // Count of digital I/O ports
774 772
775 int cnt_count; // Count of counters 773 int cnt_count; // Count of counters
776} me4000_user_info_t; 774};
777 775
778/*----------------------------------------------------------------------------- 776/*-----------------------------------------------------------------------------
779 Type definitions for analog output 777 Type definitions for analog output
780 ----------------------------------------------------------------------------*/ 778 ----------------------------------------------------------------------------*/
781 779
782typedef struct me4000_ao_channel_list { 780struct me4000_ao_channel_list {
783 unsigned long count; 781 unsigned long count;
784 unsigned long *list; 782 unsigned long *list;
785} me4000_ao_channel_list_t; 783};
786 784
787/*----------------------------------------------------------------------------- 785/*-----------------------------------------------------------------------------
788 Type definitions for analog input 786 Type definitions for analog input
789 ----------------------------------------------------------------------------*/ 787 ----------------------------------------------------------------------------*/
790 788
791typedef struct me4000_ai_channel_list { 789struct me4000_ai_channel_list {
792 unsigned long count; 790 unsigned long count;
793 unsigned long *list; 791 unsigned long *list;
794} me4000_ai_channel_list_t; 792};
795 793
796typedef struct me4000_ai_timer { 794struct me4000_ai_timer {
797 unsigned long pre_chan; 795 unsigned long pre_chan;
798 unsigned long chan; 796 unsigned long chan;
799 unsigned long scan_low; 797 unsigned long scan_low;
800 unsigned long scan_high; 798 unsigned long scan_high;
801} me4000_ai_timer_t; 799};
802 800
803typedef struct me4000_ai_config { 801struct me4000_ai_config {
804 me4000_ai_timer_t timer; 802 struct me4000_ai_timer timer;
805 me4000_ai_channel_list_t channel_list; 803 struct me4000_ai_channel_list channel_list;
806 int sh; 804 int sh;
807} me4000_ai_config_t; 805};
808 806
809typedef struct me4000_ai_single { 807struct me4000_ai_single {
810 int channel; 808 int channel;
811 int range; 809 int range;
812 int mode; 810 int mode;
813 short value; 811 short value;
814 unsigned long timeout; 812 unsigned long timeout;
815} me4000_ai_single_t; 813};
816 814
817typedef struct me4000_ai_trigger { 815struct me4000_ai_trigger {
818 int mode; 816 int mode;
819 int edge; 817 int edge;
820} me4000_ai_trigger_t; 818};
821 819
822typedef struct me4000_ai_sc { 820struct me4000_ai_sc {
823 unsigned long value; 821 unsigned long value;
824 int reload; 822 int reload;
825} me4000_ai_sc_t; 823};
826 824
827/*----------------------------------------------------------------------------- 825/*-----------------------------------------------------------------------------
828 Type definitions for eeprom 826 Type definitions for eeprom
829 ----------------------------------------------------------------------------*/ 827 ----------------------------------------------------------------------------*/
830 828
831typedef struct me4000_eeprom { 829struct me4000_eeprom {
832 unsigned long date; 830 unsigned long date;
833 short uni_10_offset; 831 short uni_10_offset;
834 short uni_10_fullscale; 832 short uni_10_fullscale;
@@ -842,45 +840,45 @@ typedef struct me4000_eeprom {
842 short diff_10_fullscale; 840 short diff_10_fullscale;
843 short diff_2_5_offset; 841 short diff_2_5_offset;
844 short diff_2_5_fullscale; 842 short diff_2_5_fullscale;
845} me4000_eeprom_t; 843};
846 844
847/*----------------------------------------------------------------------------- 845/*-----------------------------------------------------------------------------
848 Type definitions for digital I/O 846 Type definitions for digital I/O
849 ----------------------------------------------------------------------------*/ 847 ----------------------------------------------------------------------------*/
850 848
851typedef struct me4000_dio_config { 849struct me4000_dio_config {
852 int port; 850 int port;
853 int mode; 851 int mode;
854 int function; 852 int function;
855} me4000_dio_config_t; 853};
856 854
857typedef struct me4000_dio_byte { 855struct me4000_dio_byte {
858 int port; 856 int port;
859 unsigned char byte; 857 unsigned char byte;
860} me4000_dio_byte_t; 858};
861 859
862/*----------------------------------------------------------------------------- 860/*-----------------------------------------------------------------------------
863 Type definitions for counters 861 Type definitions for counters
864 ----------------------------------------------------------------------------*/ 862 ----------------------------------------------------------------------------*/
865 863
866typedef struct me4000_cnt { 864struct me4000_cnt {
867 int counter; 865 int counter;
868 unsigned short value; 866 unsigned short value;
869} me4000_cnt_t; 867};
870 868
871typedef struct me4000_cnt_config { 869struct me4000_cnt_config {
872 int counter; 870 int counter;
873 int mode; 871 int mode;
874} me4000_cnt_config_t; 872};
875 873
876/*----------------------------------------------------------------------------- 874/*-----------------------------------------------------------------------------
877 Type definitions for external interrupt 875 Type definitions for external interrupt
878 ----------------------------------------------------------------------------*/ 876 ----------------------------------------------------------------------------*/
879 877
880typedef struct { 878struct me4000_int {
881 int int1_count; 879 int int1_count;
882 int int2_count; 880 int int2_count;
883} me4000_int_type; 881};
884 882
885/*----------------------------------------------------------------------------- 883/*-----------------------------------------------------------------------------
886 The ioctls of the board 884 The ioctls of the board
@@ -888,7 +886,8 @@ typedef struct {
888 886
889#define ME4000_IOCTL_MAXNR 50 887#define ME4000_IOCTL_MAXNR 50
890#define ME4000_MAGIC 'y' 888#define ME4000_MAGIC 'y'
891#define ME4000_GET_USER_INFO _IOR (ME4000_MAGIC, 0, me4000_user_info_t) 889#define ME4000_GET_USER_INFO _IOR (ME4000_MAGIC, 0, \
890 struct me4000_user_info)
892 891
893#define ME4000_AO_START _IOW (ME4000_MAGIC, 1, unsigned long) 892#define ME4000_AO_START _IOW (ME4000_MAGIC, 1, unsigned long)
894#define ME4000_AO_STOP _IO (ME4000_MAGIC, 2) 893#define ME4000_AO_STOP _IO (ME4000_MAGIC, 2)
@@ -904,25 +903,35 @@ typedef struct {
904#define ME4000_AO_DISABLE_DO _IO (ME4000_MAGIC, 12) 903#define ME4000_AO_DISABLE_DO _IO (ME4000_MAGIC, 12)
905#define ME4000_AO_FSM_STATE _IOR (ME4000_MAGIC, 13, int) 904#define ME4000_AO_FSM_STATE _IOR (ME4000_MAGIC, 13, int)
906 905
907#define ME4000_AI_SINGLE _IOR (ME4000_MAGIC, 14, me4000_ai_single_t) 906#define ME4000_AI_SINGLE _IOR (ME4000_MAGIC, 14, \
907 struct me4000_ai_single)
908#define ME4000_AI_START _IOW (ME4000_MAGIC, 15, unsigned long) 908#define ME4000_AI_START _IOW (ME4000_MAGIC, 15, unsigned long)
909#define ME4000_AI_STOP _IO (ME4000_MAGIC, 16) 909#define ME4000_AI_STOP _IO (ME4000_MAGIC, 16)
910#define ME4000_AI_IMMEDIATE_STOP _IO (ME4000_MAGIC, 17) 910#define ME4000_AI_IMMEDIATE_STOP _IO (ME4000_MAGIC, 17)
911#define ME4000_AI_EX_TRIG_ENABLE _IO (ME4000_MAGIC, 18) 911#define ME4000_AI_EX_TRIG_ENABLE _IO (ME4000_MAGIC, 18)
912#define ME4000_AI_EX_TRIG_DISABLE _IO (ME4000_MAGIC, 19) 912#define ME4000_AI_EX_TRIG_DISABLE _IO (ME4000_MAGIC, 19)
913#define ME4000_AI_EX_TRIG_SETUP _IOW (ME4000_MAGIC, 20, me4000_ai_trigger_t) 913#define ME4000_AI_EX_TRIG_SETUP _IOW (ME4000_MAGIC, 20, \
914#define ME4000_AI_CONFIG _IOW (ME4000_MAGIC, 21, me4000_ai_config_t) 914 struct me4000_ai_trigger)
915#define ME4000_AI_SC_SETUP _IOW (ME4000_MAGIC, 22, me4000_ai_sc_t) 915#define ME4000_AI_CONFIG _IOW (ME4000_MAGIC, 21, \
916 struct me4000_ai_config)
917#define ME4000_AI_SC_SETUP _IOW (ME4000_MAGIC, 22, \
918 struct me4000_ai_sc)
916#define ME4000_AI_FSM_STATE _IOR (ME4000_MAGIC, 23, int) 919#define ME4000_AI_FSM_STATE _IOR (ME4000_MAGIC, 23, int)
917 920
918#define ME4000_DIO_CONFIG _IOW (ME4000_MAGIC, 24, me4000_dio_config_t) 921#define ME4000_DIO_CONFIG _IOW (ME4000_MAGIC, 24, \
919#define ME4000_DIO_GET_BYTE _IOR (ME4000_MAGIC, 25, me4000_dio_byte_t) 922 struct me4000_dio_config)
920#define ME4000_DIO_SET_BYTE _IOW (ME4000_MAGIC, 26, me4000_dio_byte_t) 923#define ME4000_DIO_GET_BYTE _IOR (ME4000_MAGIC, 25, \
924 struct me4000_dio_byte)
925#define ME4000_DIO_SET_BYTE _IOW (ME4000_MAGIC, 26, \
926 struct me4000_dio_byte)
921#define ME4000_DIO_RESET _IO (ME4000_MAGIC, 27) 927#define ME4000_DIO_RESET _IO (ME4000_MAGIC, 27)
922 928
923#define ME4000_CNT_READ _IOR (ME4000_MAGIC, 28, me4000_cnt_t) 929#define ME4000_CNT_READ _IOR (ME4000_MAGIC, 28, \
924#define ME4000_CNT_WRITE _IOW (ME4000_MAGIC, 29, me4000_cnt_t) 930 struct me4000_cnt)
925#define ME4000_CNT_CONFIG _IOW (ME4000_MAGIC, 30, me4000_cnt_config_t) 931#define ME4000_CNT_WRITE _IOW (ME4000_MAGIC, 29, \
932 struct me4000_cnt)
933#define ME4000_CNT_CONFIG _IOW (ME4000_MAGIC, 30, \
934 struct me4000_cnt_config)
926#define ME4000_CNT_RESET _IO (ME4000_MAGIC, 31) 935#define ME4000_CNT_RESET _IO (ME4000_MAGIC, 31)
927 936
928#define ME4000_EXT_INT_DISABLE _IO (ME4000_MAGIC, 32) 937#define ME4000_EXT_INT_DISABLE _IO (ME4000_MAGIC, 32)
@@ -934,13 +943,16 @@ typedef struct {
934#define ME4000_AI_FULLSCALE_ENABLE _IO (ME4000_MAGIC, 37) 943#define ME4000_AI_FULLSCALE_ENABLE _IO (ME4000_MAGIC, 37)
935#define ME4000_AI_FULLSCALE_DISABLE _IO (ME4000_MAGIC, 38) 944#define ME4000_AI_FULLSCALE_DISABLE _IO (ME4000_MAGIC, 38)
936 945
937#define ME4000_AI_EEPROM_READ _IOR (ME4000_MAGIC, 39, me4000_eeprom_t) 946#define ME4000_AI_EEPROM_READ _IOR (ME4000_MAGIC, 39, \
938#define ME4000_AI_EEPROM_WRITE _IOW (ME4000_MAGIC, 40, me4000_eeprom_t) 947 struct me4000_eeprom)
948#define ME4000_AI_EEPROM_WRITE _IOW (ME4000_MAGIC, 40, \
949 struct me4000_eeprom)
939 950
940#define ME4000_AO_SIMULTANEOUS_EX_TRIG _IO (ME4000_MAGIC, 41) 951#define ME4000_AO_SIMULTANEOUS_EX_TRIG _IO (ME4000_MAGIC, 41)
941#define ME4000_AO_SIMULTANEOUS_SW _IO (ME4000_MAGIC, 42) 952#define ME4000_AO_SIMULTANEOUS_SW _IO (ME4000_MAGIC, 42)
942#define ME4000_AO_SIMULTANEOUS_DISABLE _IO (ME4000_MAGIC, 43) 953#define ME4000_AO_SIMULTANEOUS_DISABLE _IO (ME4000_MAGIC, 43)
943#define ME4000_AO_SIMULTANEOUS_UPDATE _IOW (ME4000_MAGIC, 44, me4000_ao_channel_list_t) 954#define ME4000_AO_SIMULTANEOUS_UPDATE _IOW (ME4000_MAGIC, 44, \
955 struct me4000_ao_channel_list)
944 956
945#define ME4000_AO_SYNCHRONOUS_EX_TRIG _IO (ME4000_MAGIC, 45) 957#define ME4000_AO_SYNCHRONOUS_EX_TRIG _IO (ME4000_MAGIC, 45)
946#define ME4000_AO_SYNCHRONOUS_SW _IO (ME4000_MAGIC, 46) 958#define ME4000_AO_SYNCHRONOUS_SW _IO (ME4000_MAGIC, 46)
diff --git a/drivers/staging/pcc-acpi/Kconfig b/drivers/staging/pcc-acpi/Kconfig
new file mode 100644
index 000000000000..6720d4086baf
--- /dev/null
+++ b/drivers/staging/pcc-acpi/Kconfig
@@ -0,0 +1,11 @@
1config PCC_ACPI
2 tristate "Panasonic ACPI Hotkey support"
3 depends on ACPI
4 default n
5 ---help---
6 This driver provides support for Panasonic hotkeys through the
7 ACPI interface. This works for the Panasonic R1 (N variant),
8 R2, R3, T2, W2, and Y2 laptops.
9
10 To compile this driver as a module, choose M here. The module
11 will be called pcc-acpi.
diff --git a/drivers/staging/pcc-acpi/Makefile b/drivers/staging/pcc-acpi/Makefile
new file mode 100644
index 000000000000..f93b29edf61e
--- /dev/null
+++ b/drivers/staging/pcc-acpi/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_PCC_ACPI) += pcc-acpi.o
diff --git a/drivers/staging/pcc-acpi/TODO b/drivers/staging/pcc-acpi/TODO
new file mode 100644
index 000000000000..fab240982286
--- /dev/null
+++ b/drivers/staging/pcc-acpi/TODO
@@ -0,0 +1,7 @@
1TODO:
2 - Lindent fixes
3 - checkpatch.pl fixes
4 - verify that the acpi interface is correct
5 - remove /proc dependancy if needed (not sure yet.)
6
7Please send any patches for this driver to Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/staging/pcc-acpi/pcc-acpi.c b/drivers/staging/pcc-acpi/pcc-acpi.c
new file mode 100644
index 000000000000..7715c31f2731
--- /dev/null
+++ b/drivers/staging/pcc-acpi/pcc-acpi.c
@@ -0,0 +1,1111 @@
1/*
2 * Panasonic HotKey and lcd brightness control Extra driver
3 * (C) 2004 Hiroshi Miura <miura@da-cha.org>
4 * (C) 2004 NTT DATA Intellilink Co. http://www.intellilink.co.jp/
5 * (C) YOKOTA Hiroshi <yokota (at) netlab. is. tsukuba. ac. jp>
6 * (C) 2004 David Bronaugh <dbronaugh>
7 *
8 * derived from toshiba_acpi.c, Copyright (C) 2002-2004 John Belmonte
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * publicshed by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 *---------------------------------------------------------------------------
24 *
25 * ChangeLog:
26 *
27 * Nov.04, 2006 Hiroshi Miura <miura@da-cha.org>
28 * -v0.9 remove warning about section reference.
29 * remove acpi_os_free
30 * add /proc/acpi/pcc/brightness interface to
31 * allow HAL to access.
32 * merge dbronaugh's enhancement
33 * Aug.17, 2004 David Bronaugh (dbronaugh)
34 * - Added screen brightness setting interface
35 * Thanks to the FreeBSD crew
36 * (acpi_panasonic.c authors)
37 * for the ideas I needed to accomplish it
38 *
39 * May.29, 2006 Hiroshi Miura <miura@da-cha.org>
40 * -v0.8.4 follow to change keyinput structure
41 * thanks Fabian Yamaguchi <fabs@cs.tu-berlin.de>,
42 * Jacob Bower <jacob.bower@ic.ac.uk> and
43 * Hiroshi Yokota for providing solutions.
44 *
45 * Oct.02, 2004 Hiroshi Miura <miura@da-cha.org>
46 * -v0.8.2 merge code of YOKOTA Hiroshi
47 * <yokota@netlab.is.tsukuba.ac.jp>.
48 * Add sticky key mode interface.
49 * Refactoring acpi_pcc_generete_keyinput().
50 *
51 * Sep.15, 2004 Hiroshi Miura <miura@da-cha.org>
52 * -v0.8 Generate key input event on input subsystem.
53 * This is based on yet another driver
54 * written by Ryuta Nakanishi.
55 *
56 * Sep.10, 2004 Hiroshi Miura <miura@da-cha.org>
57 * -v0.7 Change proc interface functions using seq_file
58 * facility as same as other ACPI drivers.
59 *
60 * Aug.28, 2004 Hiroshi Miura <miura@da-cha.org>
61 * -v0.6.4 Fix a silly error with status checking
62 *
63 * Aug.25, 2004 Hiroshi Miura <miura@da-cha.org>
64 * -v0.6.3 replace read_acpi_int by standard
65 * function acpi_evaluate_integer
66 * some clean up and make smart copyright notice.
67 * fix return value of pcc_acpi_get_key()
68 * fix checking return value of acpi_bus_register_driver()
69 *
70 * Aug.22, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
71 * -v0.6.2 Add check on ACPI data (num_sifr)
72 * Coding style cleanups, better error messages/handling
73 * Fixed an off-by-one error in memory allocation
74 *
75 * Aug.21, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
76 * -v0.6.1 Fix a silly error with status checking
77 *
78 * Aug.20, 2004 David Bronaugh <dbronaugh@linuxboxen.org>
79 * - v0.6 Correct brightness controls to reflect reality
80 * based on information gleaned by Hiroshi Miura
81 * and discussions with Hiroshi Miura
82 *
83 * Aug.10, 2004 Hiroshi Miura <miura@da-cha.org>
84 * - v0.5 support LCD brightness control
85 * based on the disclosed information by MEI.
86 *
87 * Jul.25, 2004 Hiroshi Miura <miura@da-cha.org>
88 * - v0.4 first post version
89 * add function to retrive SIFR
90 *
91 * Jul.24, 2004 Hiroshi Miura <miura@da-cha.org>
92 * - v0.3 get proper status of hotkey
93 *
94 * Jul.22, 2004 Hiroshi Miura <miura@da-cha.org>
95 * - v0.2 add HotKey handler
96 *
97 * Jul.17, 2004 Hiroshi Miura <miura@da-cha.org>
98 * - v0.1 start from toshiba_acpi driver written by John Belmonte
99 *
100 */
101
102#define ACPI_PCC_VERSION "0.9+hy"
103
104#include <linux/kernel.h>
105#include <linux/module.h>
106#include <linux/types.h>
107#include <linux/ctype.h>
108#include <linux/init.h>
109#include <linux/input.h>
110#include <linux/proc_fs.h>
111#include <linux/seq_file.h>
112#include <linux/slab.h>
113#include <linux/uaccess.h>
114#include <acpi/acpi_bus.h>
115#include <acpi/acpi_drivers.h>
116
117
118/*************************************************************************
119 * "seq" file template definition.
120 */
121/* "seq" initializer */
122#define SEQ_OPEN_FS(_open_func_name_, _show_func_name_) \
123static int _open_func_name_(struct inode *inode, struct file *file) \
124{ \
125 return single_open(file, _show_func_name_, PDE(inode)->data); \
126}
127
128/*-------------------------------------------------------------------------
129 * "seq" fops template for read-only files.
130 */
131#define SEQ_FILEOPS_R(_open_func_name_) \
132{ \
133 .open = _open_func_name_, \
134 .read = seq_read, \
135 .llseek = seq_lseek, \
136 .release = single_release, \
137}
138
139/*------------------------------------------------------------------------
140 * "seq" fops template for read-write files.
141 */
142#define SEQ_FILEOPS_RW(_open_func_name_, _write_func_name_) \
143{ \
144 .open = _open_func_name_ , \
145 .read = seq_read, \
146 .write = _write_func_name_, \
147 .llseek = seq_lseek, \
148 .release = single_release, \
149}
150
151/*
152 * "seq" file template definition ended.
153 ***************************************************************************
154 */
155#ifndef ACPI_HOTKEY_COMPONENT
156#define ACPI_HOTKEY_COMPONENT 0x10000000
157#endif
158
159#define _COMPONENT ACPI_HOTKEY_COMPONENT
160ACPI_MODULE_NAME("pcc_acpi");
161
162MODULE_AUTHOR("Hiroshi Miura, Hiroshi Yokota");
163MODULE_DESCRIPTION("ACPI HotKey driver for Panasonic Let's Note laptops");
164MODULE_LICENSE("GPL");
165
166#define LOGPREFIX "pcc_acpi: "
167
168/****************************************************
169 * Define ACPI PATHs
170 ****************************************************/
171/* Lets note hotkeys */
172#define METHOD_HKEY_QUERY "HINF"
173#define METHOD_HKEY_SQTY "SQTY"
174#define METHOD_HKEY_SINF "SINF"
175#define METHOD_HKEY_SSET "SSET"
176#define HKEY_NOTIFY 0x80
177
178/* for brightness control */
179#define LCD_MAX_BRIGHTNESS 255
180/* This may be magical -- beware */
181#define LCD_BRIGHTNESS_INCREMENT 17
182/* Registers of SINF */
183#define SINF_LCD_BRIGHTNESS 4
184
185/*******************************************************************
186 *
187 * definitions for /proc/ interface
188 *
189 *******************************************************************/
190#define ACPI_PCC_DRIVER_NAME "pcc_acpi"
191#define ACPI_PCC_DEVICE_NAME "PCCExtra"
192#define ACPI_PCC_CLASS "pcc"
193#define PROC_PCC ACPI_PCC_CLASS
194
195#define ACPI_PCC_INPUT_PHYS "panasonic/hkey0"
196
197/* This is transitional definition */
198#ifndef KEY_BATT
199# define KEY_BATT 227
200#endif
201
202#define PROC_STR_MAX_LEN 8
203
204#define BUS_PCC_HOTKEY BUS_I8042 /*0x1a*/ /* FIXME: BUS_I8042? */
205
206/* Fn+F4/F5 confricts with Shift+F1/F2 */
207/* This hack avoids key number confrict */
208#define PCC_KEYINPUT_MODE (0)
209
210/* LCD_TYPEs: 0 = Normal, 1 = Semi-transparent
211 ENV_STATEs: Normal temp=0x01, High temp=0x81, N/A=0x00
212*/
213enum SINF_BITS { SINF_NUM_BATTERIES = 0,
214 SINF_LCD_TYPE,
215 SINF_AC_MAX_BRIGHT,
216 SINF_AC_MIN_BRIGHT,
217 SINF_AC_CUR_BRIGHT,
218 /* 4 = R1 only handle SINF_AC_CUR_BRIGHT
219 * as SINF_CUR_BRIGHT and don't know AC state */
220 SINF_DC_MAX_BRIGHT,
221 SINF_DC_MIN_BRIGHT,
222 SINF_DC_CUR_BRIGHT,
223 SINF_MUTE,
224 SINF_RESERVED,
225 SINF_ENV_STATE, /* 10 */
226 SINF_STICKY_KEY = 0x80,
227};
228
229static struct acpi_device_id pcc_device_ids[] = {
230 {"MAT0012", 0},
231 {"MAT0013", 0},
232 {"MAT0018", 0},
233 {"MAT0019", 0},
234 {"", 0},
235};
236MODULE_DEVICE_TABLE(acpi, pcc_device_ids);
237
238
239static int __devinit acpi_pcc_hotkey_add(struct acpi_device *device);
240static int __devexit acpi_pcc_hotkey_remove(struct acpi_device *device,
241 int type);
242static int acpi_pcc_hotkey_resume(struct acpi_device *device);
243
244
245static struct acpi_driver acpi_pcc_driver = {
246 .name = ACPI_PCC_DRIVER_NAME,
247 .class = ACPI_PCC_CLASS,
248 .ids = pcc_device_ids,
249 .ops = {
250 .add = acpi_pcc_hotkey_add,
251 .remove = __devexit_p(acpi_pcc_hotkey_remove),
252#ifdef CONFIG_PM
253 /*.suspend = acpi_pcc_hotkey_suspend,*/
254 .resume = acpi_pcc_hotkey_resume,
255#endif
256 },
257};
258
259struct acpi_hotkey {
260 acpi_handle handle;
261 struct acpi_device *device;
262 struct proc_dir_entry *proc_dir_entry;
263 unsigned long num_sifr;
264 unsigned long status;
265 struct input_dev *input_dev;
266 int sticky_mode;
267};
268
269struct pcc_keyinput {
270 struct acpi_hotkey *hotkey;
271 int key_mode;
272};
273
274/* *************************************************************************
275 Hotkey driver core
276 ************************************************************************* */
277/* -------------------------------------------------------------------------
278 method access functions
279 ------------------------------------------------------------------------- */
280static int acpi_pcc_write_sset(struct acpi_hotkey *hotkey, int func, int val)
281{
282 union acpi_object in_objs[] = {
283 { .integer.type = ACPI_TYPE_INTEGER,
284 .integer.value = func, },
285 { .integer.type = ACPI_TYPE_INTEGER,
286 .integer.value = val, },
287 };
288 struct acpi_object_list params = {
289 .count = ARRAY_SIZE(in_objs),
290 .pointer = in_objs,
291 };
292 acpi_status status;
293
294 ACPI_FUNCTION_TRACE("acpi_pcc_write_sset");
295
296 status = acpi_evaluate_object(hotkey->handle, METHOD_HKEY_SSET,
297 &params, NULL);
298
299 return_VALUE(status == AE_OK ? AE_OK : AE_ERROR);
300}
301
302static inline int acpi_pcc_get_sqty(struct acpi_device *device)
303{
304 unsigned long s;
305 acpi_status status;
306
307 ACPI_FUNCTION_TRACE("acpi_pcc_get_sqty");
308
309 status = acpi_evaluate_integer(device->handle, METHOD_HKEY_SQTY,
310 NULL, &s);
311 if (ACPI_SUCCESS(status)) {
312 return_VALUE(s);
313 } else {
314 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
315 "evaluation error HKEY.SQTY\n"));
316 return_VALUE(-EINVAL);
317 }
318}
319
320static int acpi_pcc_retrieve_biosdata(struct acpi_hotkey *hotkey, u32 *sinf)
321{
322 acpi_status status;
323 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
324 union acpi_object *hkey = NULL;
325 int i;
326
327 ACPI_FUNCTION_TRACE("acpi_pcc_retrieve_biosdata");
328
329 status = acpi_evaluate_object(hotkey->handle, METHOD_HKEY_SINF, 0,
330 &buffer);
331 if (ACPI_FAILURE(status)) {
332 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
333 "evaluation error HKEY.SINF\n"));
334 status = AE_ERROR;
335 return_VALUE(status);
336 }
337
338 hkey = buffer.pointer;
339 if (!hkey || (hkey->type != ACPI_TYPE_PACKAGE)) {
340 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid HKEY.SINF\n"));
341 goto free_buffer;
342 }
343
344 if (hotkey->num_sifr < hkey->package.count) {
345 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
346 "SQTY reports bad SINF length\n"));
347 status = AE_ERROR;
348 goto free_buffer;
349 }
350
351 for (i = 0; i < hkey->package.count; i++) {
352 union acpi_object *element = &(hkey->package.elements[i]);
353 if (likely(element->type == ACPI_TYPE_INTEGER)) {
354 sinf[i] = element->integer.value;
355 } else {
356 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
357 "Invalid HKEY.SINF data\n"));
358 status = AE_ERROR;
359 break;
360 }
361 }
362 sinf[hkey->package.count] = -1;
363
364 free_buffer:
365 kfree(buffer.pointer);
366 return_VALUE(status == AE_OK ? AE_OK : AE_ERROR);
367}
368
369static int acpi_pcc_read_sinf_field(struct seq_file *seq, int field)
370{
371 struct acpi_hotkey *hotkey = (struct acpi_hotkey *) seq->private;
372 u32 sinf[hotkey->num_sifr + 1];
373
374 ACPI_FUNCTION_TRACE("acpi_pcc_read_sinf_field");
375
376 if (ACPI_SUCCESS(acpi_pcc_retrieve_biosdata(hotkey, sinf)))
377 seq_printf(seq, "%u\n", sinf[field]);
378 else
379 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
380 "Couldn't retrieve BIOS data\n"));
381
382 return_VALUE(AE_OK);
383}
384
385/* -------------------------------------------------------------------------
386 user interface functions
387 ------------------------------------------------------------------------- */
388/* read methods */
389/* Sinf read methods */
390#define PCC_SINF_READ_F(_name_, FUNC) \
391static int _name_(struct seq_file *seq, void *offset) \
392{ \
393 return_VALUE(ACPI_SUCCESS(acpi_pcc_read_sinf_field(seq, \
394 (FUNC))) \
395 ? 0 : -EINVAL); \
396}
397
398PCC_SINF_READ_F(acpi_pcc_numbatteries_show, SINF_NUM_BATTERIES);
399PCC_SINF_READ_F(acpi_pcc_lcdtype_show, SINF_LCD_TYPE);
400PCC_SINF_READ_F(acpi_pcc_ac_brightness_max_show, SINF_AC_MAX_BRIGHT);
401PCC_SINF_READ_F(acpi_pcc_ac_brightness_min_show, SINF_AC_MIN_BRIGHT);
402PCC_SINF_READ_F(acpi_pcc_ac_brightness_show, SINF_AC_CUR_BRIGHT);
403PCC_SINF_READ_F(acpi_pcc_dc_brightness_max_show, SINF_DC_MAX_BRIGHT);
404PCC_SINF_READ_F(acpi_pcc_dc_brightness_min_show, SINF_DC_MIN_BRIGHT);
405PCC_SINF_READ_F(acpi_pcc_dc_brightness_show, SINF_DC_CUR_BRIGHT);
406PCC_SINF_READ_F(acpi_pcc_brightness_show, SINF_AC_CUR_BRIGHT);
407PCC_SINF_READ_F(acpi_pcc_mute_show, SINF_MUTE);
408
409static int acpi_pcc_sticky_key_show(struct seq_file *seq, void *offset)
410{
411 struct acpi_hotkey *hotkey = seq->private;
412
413 ACPI_FUNCTION_TRACE("acpi_pcc_sticky_key_show");
414
415 if (!hotkey || !hotkey->device)
416 return_VALUE(-EINVAL);
417
418 seq_printf(seq, "%d\n", hotkey->sticky_mode);
419
420 return_VALUE(0);
421}
422
423static int acpi_pcc_keyinput_show(struct seq_file *seq, void *offset)
424{
425 struct acpi_hotkey *hotkey = seq->private;
426 struct input_dev *hotk_input_dev = hotkey->input_dev;
427 struct pcc_keyinput *keyinput = input_get_drvdata(hotk_input_dev);
428
429 ACPI_FUNCTION_TRACE("acpi_pcc_keyinput_show");
430
431 seq_printf(seq, "%d\n", keyinput->key_mode);
432
433 return_VALUE(0);
434}
435
436static int acpi_pcc_version_show(struct seq_file *seq, void *offset)
437{
438 struct acpi_hotkey *hotkey = seq->private;
439
440 ACPI_FUNCTION_TRACE("acpi_pcc_version_show");
441
442 if (!hotkey || !hotkey->device)
443 return_VALUE(-EINVAL);
444
445 seq_printf(seq, "%s version %s\n", ACPI_PCC_DRIVER_NAME,
446 ACPI_PCC_VERSION);
447 seq_printf(seq, "%li functions\n", hotkey->num_sifr);
448
449 return_VALUE(0);
450}
451
452/* write methods */
453static ssize_t acpi_pcc_write_single_flag(struct file *file,
454 const char __user *buffer,
455 size_t count,
456 int sinf_func)
457{
458 struct seq_file *seq = file->private_data;
459 struct acpi_hotkey *hotkey = seq->private;
460 char write_string[PROC_STR_MAX_LEN];
461 u32 val;
462
463 ACPI_FUNCTION_TRACE("acpi_pcc_write_single_flag");
464
465 if (!hotkey || (count > sizeof(write_string) - 1))
466 return_VALUE(-EINVAL);
467
468 if (copy_from_user(write_string, buffer, count))
469 return_VALUE(-EFAULT);
470
471 write_string[count] = '\0';
472
473 if ((sscanf(write_string, "%3i", &val) == 1) &&
474 (val == 0 || val == 1))
475 acpi_pcc_write_sset(hotkey, sinf_func, val);
476
477 return_VALUE(count);
478}
479
480static unsigned long acpi_pcc_write_brightness(struct file *file,
481 const char __user *buffer,
482 size_t count,
483 int min_index, int max_index,
484 int cur_index)
485{
486 struct seq_file *seq = (struct seq_file *)file->private_data;
487 struct acpi_hotkey *hotkey = (struct acpi_hotkey *)seq->private;
488 char write_string[PROC_STR_MAX_LEN];
489 u32 bright;
490 u32 sinf[hotkey->num_sifr + 1];
491
492 ACPI_FUNCTION_TRACE("acpi_pcc_write_brightness");
493
494 if (!hotkey || (count > sizeof(write_string) - 1))
495 return_VALUE(-EINVAL);
496
497 if (copy_from_user(write_string, buffer, count))
498 return_VALUE(-EFAULT);
499
500 write_string[count] = '\0';
501
502 if (ACPI_FAILURE(acpi_pcc_retrieve_biosdata(hotkey, sinf))) {
503 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
504 "Couldn't retrieve BIOS data\n"));
505 goto end;
506 }
507
508 if ((sscanf(write_string, "%4i", &bright) == 1) &&
509 (bright >= sinf[min_index]) &&
510 (bright <= sinf[max_index]))
511 acpi_pcc_write_sset(hotkey, cur_index, bright);
512
513end:
514 return_VALUE(count);
515}
516
517static ssize_t acpi_pcc_write_ac_brightness(struct file *file,
518 const char __user *buffer,
519 size_t count, loff_t *ppos)
520{
521 return_VALUE(acpi_pcc_write_brightness(file, buffer, count,
522 SINF_AC_MIN_BRIGHT,
523 SINF_AC_MAX_BRIGHT,
524 SINF_AC_CUR_BRIGHT));
525}
526
527static ssize_t acpi_pcc_write_dc_brightness(struct file *file,
528 const char __user *buffer,
529 size_t count, loff_t *ppos)
530{
531 return_VALUE(acpi_pcc_write_brightness(file, buffer, count,
532 SINF_DC_MIN_BRIGHT,
533 SINF_DC_MAX_BRIGHT,
534 SINF_DC_CUR_BRIGHT));
535}
536
537static ssize_t acpi_pcc_write_no_brightness(struct file *file,
538 const char __user *buffer,
539 size_t count, loff_t *ppos)
540{
541 return acpi_pcc_write_brightness(file, buffer, count,
542 SINF_AC_MIN_BRIGHT,
543 SINF_AC_MAX_BRIGHT,
544 SINF_AC_CUR_BRIGHT);
545}
546
547static ssize_t acpi_pcc_write_mute(struct file *file,
548 const char __user *buffer,
549 size_t count, loff_t *ppos)
550{
551 return_VALUE(acpi_pcc_write_single_flag(file, buffer, count,
552 SINF_MUTE));
553}
554
555static ssize_t acpi_pcc_write_sticky_key(struct file *file,
556 const char __user *buffer,
557 size_t count, loff_t *ppos)
558{
559 struct seq_file *seq = (struct seq_file *)file->private_data;
560 struct acpi_hotkey *hotkey = (struct acpi_hotkey *)seq->private;
561 char write_string[PROC_STR_MAX_LEN];
562 int mode;
563
564 ACPI_FUNCTION_TRACE("acpi_pcc_write_sticky_key");
565
566 if (!hotkey || (count > sizeof(write_string) - 1))
567 return_VALUE(-EINVAL);
568
569 if (copy_from_user(write_string, buffer, count))
570 return_VALUE(-EFAULT);
571
572 write_string[count] = '\0';
573
574 if ((sscanf(write_string, "%3i", &mode) == 1) &&
575 (mode == 0 || mode == 1)) {
576 acpi_pcc_write_sset(hotkey, SINF_STICKY_KEY, mode);
577 hotkey->sticky_mode = mode;
578 }
579
580 return_VALUE(count);
581}
582
583static ssize_t acpi_pcc_write_keyinput(struct file *file,
584 const char __user *buffer,
585 size_t count, loff_t *ppos)
586{
587 struct seq_file *seq = (struct seq_file *)file->private_data;
588 struct acpi_hotkey *hotkey = (struct acpi_hotkey *)seq->private;
589 struct pcc_keyinput *keyinput;
590 char write_string[PROC_STR_MAX_LEN];
591 int key_mode;
592
593 ACPI_FUNCTION_TRACE("acpi_pcc_write_keyinput");
594
595 if (!hotkey || (count > (sizeof(write_string) - 1)))
596 return_VALUE(-EINVAL);
597
598 if (copy_from_user(write_string, buffer, count))
599 return_VALUE(-EFAULT);
600
601 write_string[count] = '\0';
602
603 if ((sscanf(write_string, "%4i", &key_mode) == 1) &&
604 (key_mode == 0 || key_mode == 1)) {
605 keyinput = input_get_drvdata(hotkey->input_dev);
606 keyinput->key_mode = key_mode;
607 }
608
609 return_VALUE(count);
610}
611
612/* -------------------------------------------------------------------------
613 hotkey driver
614 ------------------------------------------------------------------------- */
615static void acpi_pcc_generete_keyinput(struct acpi_hotkey *hotkey)
616{
617 struct input_dev *hotk_input_dev = hotkey->input_dev;
618 struct pcc_keyinput *keyinput = input_get_drvdata(hotk_input_dev);
619 int hinf = hotkey->status;
620 int key_code, hkey_num;
621 const int key_map[] = {
622 /* 0 */ -1,
623 /* 1 */ KEY_BRIGHTNESSDOWN,
624 /* 2 */ KEY_BRIGHTNESSUP,
625 /* 3 */ -1, /* vga/lcd switch event is not occur on
626 hotkey driver. */
627 /* 4 */ KEY_MUTE,
628 /* 5 */ KEY_VOLUMEDOWN,
629 /* 6 */ KEY_VOLUMEUP,
630 /* 7 */ KEY_SLEEP,
631 /* 8 */ -1, /* Change CPU boost: do nothing */
632 /* 9 */ KEY_BATT,
633 /* 10 */ KEY_SUSPEND,
634 };
635
636 ACPI_FUNCTION_TRACE("acpi_pcc_generete_keyinput");
637
638 if (keyinput->key_mode == 0)
639 return_VOID;
640
641 hkey_num = hinf & 0xf;
642
643 if ((0 > hkey_num) ||
644 (hkey_num > ARRAY_SIZE(key_map))) {
645 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
646 "hotkey number out of range: %d\n",
647 hkey_num));
648 return_VOID;
649 }
650
651 key_code = key_map[hkey_num];
652
653 if (key_code != -1) {
654 int pushed = (hinf & 0x80) ? TRUE : FALSE;
655
656 input_report_key(hotk_input_dev, key_code, pushed);
657 input_sync(hotk_input_dev);
658 }
659}
660
661static int acpi_pcc_hotkey_get_key(struct acpi_hotkey *hotkey)
662{
663 unsigned long result;
664 acpi_status status = AE_OK;
665
666 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_get_key");
667
668 status = acpi_evaluate_integer(hotkey->handle, METHOD_HKEY_QUERY,
669 NULL, &result);
670 if (likely(ACPI_SUCCESS(status)))
671 hotkey->status = result;
672 else
673 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
674 "error getting hotkey status\n"));
675
676 return_VALUE(status == AE_OK);
677}
678
679void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data)
680{
681 struct acpi_hotkey *hotkey = (struct acpi_hotkey *) data;
682
683 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_notify");
684
685 switch (event) {
686 case HKEY_NOTIFY:
687 if (acpi_pcc_hotkey_get_key(hotkey)) {
688 /* generate event like '"pcc HKEY 00000080 00000084"'
689 * when Fn+F4 pressed */
690 acpi_bus_generate_proc_event(hotkey->device, event,
691 hotkey->status);
692 }
693 acpi_pcc_generete_keyinput(hotkey);
694 break;
695 default:
696 /* nothing to do */
697 break;
698 }
699 return_VOID;
700}
701
702/* *************************************************************************
703 FS Interface (/proc)
704 ************************************************************************* */
705/* oepn proc file fs*/
706SEQ_OPEN_FS(acpi_pcc_dc_brightness_open_fs, acpi_pcc_dc_brightness_show);
707SEQ_OPEN_FS(acpi_pcc_numbatteries_open_fs, acpi_pcc_numbatteries_show);
708SEQ_OPEN_FS(acpi_pcc_lcdtype_open_fs, acpi_pcc_lcdtype_show);
709SEQ_OPEN_FS(acpi_pcc_ac_brightness_max_open_fs,
710 acpi_pcc_ac_brightness_max_show);
711SEQ_OPEN_FS(acpi_pcc_ac_brightness_min_open_fs,
712 acpi_pcc_ac_brightness_min_show);
713SEQ_OPEN_FS(acpi_pcc_ac_brightness_open_fs, acpi_pcc_ac_brightness_show);
714SEQ_OPEN_FS(acpi_pcc_dc_brightness_max_open_fs,
715 acpi_pcc_dc_brightness_max_show);
716SEQ_OPEN_FS(acpi_pcc_dc_brightness_min_open_fs,
717 acpi_pcc_dc_brightness_min_show);
718SEQ_OPEN_FS(acpi_pcc_brightness_open_fs, acpi_pcc_brightness_show);
719SEQ_OPEN_FS(acpi_pcc_mute_open_fs, acpi_pcc_mute_show);
720SEQ_OPEN_FS(acpi_pcc_version_open_fs, acpi_pcc_version_show);
721SEQ_OPEN_FS(acpi_pcc_keyinput_open_fs, acpi_pcc_keyinput_show);
722SEQ_OPEN_FS(acpi_pcc_sticky_key_open_fs, acpi_pcc_sticky_key_show);
723
724static struct file_operations acpi_pcc_numbatteries_fops =
725 SEQ_FILEOPS_R(acpi_pcc_numbatteries_open_fs);
726static struct file_operations acpi_pcc_lcdtype_fops =
727 SEQ_FILEOPS_R(acpi_pcc_lcdtype_open_fs);
728static struct file_operations acpi_pcc_mute_fops =
729 SEQ_FILEOPS_RW(acpi_pcc_mute_open_fs, acpi_pcc_write_mute);
730static struct file_operations acpi_pcc_ac_brightness_fops =
731 SEQ_FILEOPS_RW(acpi_pcc_ac_brightness_open_fs,
732 acpi_pcc_write_ac_brightness);
733static struct file_operations acpi_pcc_ac_brightness_max_fops =
734 SEQ_FILEOPS_R(acpi_pcc_ac_brightness_max_open_fs);
735static struct file_operations acpi_pcc_ac_brightness_min_fops =
736 SEQ_FILEOPS_R(acpi_pcc_ac_brightness_min_open_fs);
737static struct file_operations acpi_pcc_dc_brightness_fops =
738 SEQ_FILEOPS_RW(acpi_pcc_dc_brightness_open_fs,
739 acpi_pcc_write_dc_brightness);
740static struct file_operations acpi_pcc_dc_brightness_max_fops =
741 SEQ_FILEOPS_R(acpi_pcc_dc_brightness_max_open_fs);
742static struct file_operations acpi_pcc_dc_brightness_min_fops =
743 SEQ_FILEOPS_R(acpi_pcc_dc_brightness_min_open_fs);
744static struct file_operations acpi_pcc_brightness_fops =
745 SEQ_FILEOPS_RW(acpi_pcc_brightness_open_fs,
746 acpi_pcc_write_no_brightness);
747static struct file_operations acpi_pcc_sticky_key_fops =
748 SEQ_FILEOPS_RW(acpi_pcc_sticky_key_open_fs, acpi_pcc_write_sticky_key);
749static struct file_operations acpi_pcc_keyinput_fops =
750 SEQ_FILEOPS_RW(acpi_pcc_keyinput_open_fs, acpi_pcc_write_keyinput);
751static struct file_operations acpi_pcc_version_fops =
752 SEQ_FILEOPS_R(acpi_pcc_version_open_fs);
753
754struct proc_item {
755 const char *name;
756 struct file_operations *fops;
757 mode_t flag;
758};
759
760/* Note: These functions map *exactly* to the SINF/SSET functions */
761struct proc_item acpi_pcc_proc_items_sifr[] = {
762 { "num_batteries", &acpi_pcc_numbatteries_fops, S_IRUGO },
763 { "lcd_type", &acpi_pcc_lcdtype_fops, S_IRUGO },
764 { "ac_brightness_max", &acpi_pcc_ac_brightness_max_fops, S_IRUGO },
765 { "ac_brightness_min", &acpi_pcc_ac_brightness_min_fops, S_IRUGO },
766 { "ac_brightness", &acpi_pcc_ac_brightness_fops,
767 S_IFREG | S_IRUGO | S_IWUSR },
768 { "dc_brightness_max", &acpi_pcc_dc_brightness_max_fops, S_IRUGO },
769 { "dc_brightness_min", &acpi_pcc_dc_brightness_min_fops, S_IRUGO },
770 { "dc_brightness", &acpi_pcc_dc_brightness_fops,
771 S_IFREG | S_IRUGO | S_IWUSR },
772 { "brightness", &acpi_pcc_brightness_fops, S_IFREG | S_IRUGO | S_IWUSR},
773 { "mute", &acpi_pcc_mute_fops, S_IFREG | S_IRUGO | S_IWUSR },
774 { NULL, NULL, 0 },
775};
776
777struct proc_item acpi_pcc_proc_items[] = {
778 { "sticky_key", &acpi_pcc_sticky_key_fops, S_IFREG | S_IRUGO | S_IWUSR},
779 { "keyinput", &acpi_pcc_keyinput_fops, S_IFREG | S_IRUGO | S_IWUSR },
780 { "version", &acpi_pcc_version_fops, S_IRUGO },
781 { NULL, NULL, 0 },
782};
783
784static int __devinit acpi_pcc_add_device(struct acpi_device *device,
785 struct proc_item *proc_items,
786 int num)
787{
788 struct acpi_hotkey *hotkey = acpi_driver_data(device);
789 struct proc_dir_entry *proc;
790 struct proc_item *item;
791 int i;
792
793 for (item = proc_items, i = 0; item->name && i < num; ++item, ++i) {
794 proc = create_proc_entry(item->name, item->flag,
795 hotkey->proc_dir_entry);
796 if (likely(proc)) {
797 proc->proc_fops = item->fops;
798 proc->data = hotkey;
799 proc->owner = THIS_MODULE;
800 } else {
801 while (i-- > 0) {
802 item--;
803 remove_proc_entry(item->name,
804 hotkey->proc_dir_entry);
805 }
806 return_VALUE(-ENODEV);
807 }
808 }
809 return_VALUE(0);
810}
811
812static int __devinit acpi_pcc_proc_init(struct acpi_device *device)
813{
814 struct proc_dir_entry *acpi_pcc_dir;
815 struct acpi_hotkey *hotkey = acpi_driver_data(device);
816 acpi_status status;
817
818 ACPI_FUNCTION_TRACE("acpi_pcc_proc_init");
819
820 acpi_pcc_dir = proc_mkdir(PROC_PCC, acpi_root_dir);
821
822 if (unlikely(!acpi_pcc_dir)) {
823 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
824 "Couldn't create dir in /proc\n"));
825 return_VALUE(-ENODEV);
826 }
827
828 acpi_pcc_dir->owner = THIS_MODULE;
829 hotkey->proc_dir_entry = acpi_pcc_dir;
830
831 status = acpi_pcc_add_device(device, acpi_pcc_proc_items_sifr,
832 hotkey->num_sifr);
833 status |= acpi_pcc_add_device(device, acpi_pcc_proc_items,
834 ARRAY_SIZE(acpi_pcc_proc_items));
835 if (unlikely(status)) {
836 remove_proc_entry(PROC_PCC, acpi_root_dir);
837 hotkey->proc_dir_entry = NULL;
838 return_VALUE(-ENODEV);
839 }
840
841 return_VALUE(status);
842}
843
844static void __devexit acpi_pcc_remove_device(struct acpi_device *device,
845 struct proc_item *proc_items,
846 int num)
847{
848 struct acpi_hotkey *hotkey = acpi_driver_data(device);
849 struct proc_item *item;
850 int i;
851
852 for (item = proc_items, i = 0;
853 item->name != NULL && i < num;
854 ++item, ++i) {
855 remove_proc_entry(item->name, hotkey->proc_dir_entry);
856 }
857
858 return_VOID;
859}
860
861/* *************************************************************************
862 Power Management
863 ************************************************************************* */
864#ifdef CONFIG_PM
865static int acpi_pcc_hotkey_resume(struct acpi_device *device)
866{
867 struct acpi_hotkey *hotkey = acpi_driver_data(device);
868 acpi_status status = AE_OK;
869
870 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_resume");
871
872 if (device == NULL || hotkey == NULL)
873 return_VALUE(-EINVAL);
874
875 if (hotkey->num_sifr != 0) {
876 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Sticky mode restore: %d\n",
877 hotkey->sticky_mode));
878
879 status = acpi_pcc_write_sset(hotkey, SINF_STICKY_KEY,
880 hotkey->sticky_mode);
881 }
882 if (status != AE_OK)
883 return_VALUE(-EINVAL);
884
885 return_VALUE(0);
886}
887#endif
888
889/* *************************************************************************
890 Module init/remove
891 ************************************************************************* */
892/* -------------------------------------------------------------------------
893 input
894 ------------------------------------------------------------------------- */
895static int __devinit acpi_pcc_init_input(struct acpi_hotkey *hotkey)
896{
897 struct input_dev *hotk_input_dev;
898 struct pcc_keyinput *pcc_keyinput;
899 int error;
900
901 ACPI_FUNCTION_TRACE("acpi_pcc_init_input");
902
903 hotk_input_dev = input_allocate_device();
904 if (hotk_input_dev == NULL) {
905 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
906 "Couldn't allocate input device for hotkey"));
907 goto err_input;
908 }
909
910 pcc_keyinput = kcalloc(1, sizeof(struct pcc_keyinput), GFP_KERNEL);
911
912 if (pcc_keyinput == NULL) {
913 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
914 "Couldn't allocate mem for private data"));
915 goto err_pcc;
916 }
917
918 hotk_input_dev->evbit[0] = BIT(EV_KEY);
919
920 set_bit(KEY_BRIGHTNESSDOWN, hotk_input_dev->keybit);
921 set_bit(KEY_BRIGHTNESSUP, hotk_input_dev->keybit);
922 set_bit(KEY_MUTE, hotk_input_dev->keybit);
923 set_bit(KEY_VOLUMEDOWN, hotk_input_dev->keybit);
924 set_bit(KEY_VOLUMEUP, hotk_input_dev->keybit);
925 set_bit(KEY_SLEEP, hotk_input_dev->keybit);
926 set_bit(KEY_BATT, hotk_input_dev->keybit);
927 set_bit(KEY_SUSPEND, hotk_input_dev->keybit);
928
929 hotk_input_dev->name = ACPI_PCC_DRIVER_NAME;
930 hotk_input_dev->phys = ACPI_PCC_INPUT_PHYS;
931 hotk_input_dev->id.bustype = BUS_PCC_HOTKEY;
932 hotk_input_dev->id.vendor = 0x0001;
933 hotk_input_dev->id.product = 0x0001;
934 hotk_input_dev->id.version = 0x0100;
935
936 pcc_keyinput->key_mode = PCC_KEYINPUT_MODE;
937 pcc_keyinput->hotkey = hotkey;
938
939 input_set_drvdata(hotk_input_dev, pcc_keyinput);
940
941 hotkey->input_dev = hotk_input_dev;
942
943 error = input_register_device(hotk_input_dev);
944
945 if (error)
946 goto err_pcc;
947
948 return_VALUE(0);
949
950 err_pcc:
951 input_unregister_device(hotk_input_dev);
952 err_input:
953 return_VALUE(-ENOMEM);
954}
955
956static void __devexit acpi_pcc_remove_input(struct acpi_hotkey *hotkey)
957{
958 struct input_dev *hotk_input_dev;
959 struct pcc_keyinput *pcc_keyinput;
960
961 ACPI_FUNCTION_TRACE("acpi_pcc_remove_input");
962
963 if (hotkey == NULL) {
964 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Can't free memory"));
965 return_VOID;
966 }
967
968 hotk_input_dev = hotkey->input_dev;
969 pcc_keyinput = input_get_drvdata(hotk_input_dev);
970
971 input_unregister_device(hotk_input_dev);
972
973 kfree(pcc_keyinput);
974}
975
976/* -------------------------------------------------------------------------
977 ACPI
978 ------------------------------------------------------------------------- */
979static int __devinit acpi_pcc_hotkey_add(struct acpi_device *device)
980{
981 acpi_status status = AE_OK;
982 struct acpi_hotkey *hotkey = NULL;
983 int sifr_status, num_sifr, result;
984
985 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_add");
986
987 if (device == NULL)
988 return_VALUE(-EINVAL);
989
990 sifr_status = acpi_pcc_get_sqty(device);
991
992 if (sifr_status > 255) {
993 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr too large"));
994 return_VALUE(-ENODEV);
995 }
996
997 if (sifr_status < 0) {
998 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "not support SQTY"));
999 num_sifr = 0;
1000 } else {
1001 num_sifr = sifr_status;
1002 }
1003
1004 hotkey = kcalloc(1, sizeof(struct acpi_hotkey), GFP_KERNEL);
1005 if (hotkey == NULL) {
1006 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1007 "Couldn't allocate mem for hotkey"));
1008 return_VALUE(-ENOMEM);
1009 }
1010
1011 hotkey->device = device;
1012 hotkey->handle = device->handle;
1013 hotkey->num_sifr = num_sifr;
1014 acpi_driver_data(device) = hotkey;
1015 strcpy(acpi_device_name(device), ACPI_PCC_DEVICE_NAME);
1016 strcpy(acpi_device_class(device), ACPI_PCC_CLASS);
1017
1018 status = acpi_install_notify_handler(hotkey->handle,
1019 ACPI_DEVICE_NOTIFY,
1020 acpi_pcc_hotkey_notify,
1021 hotkey);
1022
1023 if (ACPI_FAILURE(status)) {
1024 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1025 "Error installing notify handler\n"));
1026 kfree(hotkey);
1027 return_VALUE(-ENODEV);
1028 }
1029
1030 result = acpi_pcc_init_input(hotkey);
1031 if (result != 0) {
1032 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1033 "Error installing keyinput handler\n"));
1034 kfree(hotkey);
1035 return_VALUE(result);
1036 }
1037
1038 return_VALUE(acpi_pcc_proc_init(device));
1039}
1040
1041static int __devexit acpi_pcc_hotkey_remove(struct acpi_device *device,
1042 int type)
1043{
1044 acpi_status status = AE_OK;
1045 struct acpi_hotkey *hotkey = acpi_driver_data(device);
1046
1047 ACPI_FUNCTION_TRACE("acpi_pcc_hotkey_remove");
1048
1049 if (!device || !hotkey)
1050 return_VALUE(-EINVAL);
1051
1052 if (hotkey->proc_dir_entry) {
1053 acpi_pcc_remove_device(device, acpi_pcc_proc_items_sifr,
1054 hotkey->num_sifr);
1055 acpi_pcc_remove_device(device, acpi_pcc_proc_items,
1056 ARRAY_SIZE(acpi_pcc_proc_items));
1057 remove_proc_entry(PROC_PCC, acpi_root_dir);
1058 }
1059
1060 status = acpi_remove_notify_handler(hotkey->handle,
1061 ACPI_DEVICE_NOTIFY, acpi_pcc_hotkey_notify);
1062
1063 if (ACPI_FAILURE(status)) {
1064 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1065 "Error removing notify handler\n"));
1066 }
1067
1068 acpi_pcc_remove_input(hotkey);
1069 kfree(hotkey);
1070 return_VALUE(status == AE_OK);
1071}
1072
1073/* *********************************************************************
1074 Module entry point
1075 ********************************************************************* */
1076static int __init acpi_pcc_init(void)
1077{
1078 int result;
1079
1080 ACPI_FUNCTION_TRACE("acpi_pcc_init");
1081
1082 printk(KERN_INFO LOGPREFIX "loading...\n");
1083
1084 if (acpi_disabled) {
1085 printk(KERN_INFO LOGPREFIX "ACPI disabled.\n");
1086 return_VALUE(-ENODEV);
1087 }
1088
1089 result = acpi_bus_register_driver(&acpi_pcc_driver);
1090 if (result < 0) {
1091 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1092 "Error registering hotkey driver\n"));
1093 return_VALUE(-ENODEV);
1094 }
1095
1096 return_VALUE(result);
1097}
1098
1099static void __exit acpi_pcc_exit(void)
1100{
1101 ACPI_FUNCTION_TRACE("acpi_pcc_exit");
1102
1103 printk(KERN_INFO LOGPREFIX "unloading...\n");
1104
1105 acpi_bus_unregister_driver(&acpi_pcc_driver);
1106
1107 return_VOID;
1108}
1109
1110module_init(acpi_pcc_init);
1111module_exit(acpi_pcc_exit);
diff --git a/drivers/staging/poch/Kconfig b/drivers/staging/poch/Kconfig
new file mode 100644
index 000000000000..b3b33b984a57
--- /dev/null
+++ b/drivers/staging/poch/Kconfig
@@ -0,0 +1,6 @@
1config POCH
2 tristate "Redrapids Pocket Change CardBus support"
3 depends on PCI && UIO
4 default N
5 ---help---
6 Enable support for Redrapids Pocket Change CardBus devices.
diff --git a/drivers/staging/poch/Makefile b/drivers/staging/poch/Makefile
new file mode 100644
index 000000000000..d2b96805cb9e
--- /dev/null
+++ b/drivers/staging/poch/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_POCH) += poch.o
diff --git a/drivers/staging/poch/README b/drivers/staging/poch/README
new file mode 100644
index 000000000000..f65e979743ba
--- /dev/null
+++ b/drivers/staging/poch/README
@@ -0,0 +1,7 @@
1TODO:
2 - fix transmit overflows
3 - audit userspace interfaces
4 - get reserved major/minor if needed
5
6Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
7Vijay Kumar <vijaykumar@bravegnu.org> and Jaya Kumar <jayakumar.lkml@gmail.com>
diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
new file mode 100644
index 000000000000..0e113f9a1581
--- /dev/null
+++ b/drivers/staging/poch/poch.c
@@ -0,0 +1,1425 @@
1/*
2 * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
3 *
4 * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
5 *
6 * Licensed under GPL version 2 only.
7 */
8
9#include <linux/device.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/uio_driver.h>
13#include <linux/spinlock.h>
14#include <linux/cdev.h>
15#include <linux/delay.h>
16#include <linux/sysfs.h>
17#include <linux/poll.h>
18#include <linux/idr.h>
19#include <linux/interrupt.h>
20#include <linux/init.h>
21#include <linux/ioctl.h>
22#include <linux/io.h>
23
24#include "poch.h"
25
26#include <asm/cacheflush.h>
27
28#ifndef PCI_VENDOR_ID_RRAPIDS
29#define PCI_VENDOR_ID_RRAPIDS 0x17D2
30#endif
31
32#ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
33#define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
34#endif
35
36#define POCH_NCHANNELS 2
37
38#define MAX_POCH_CARDS 8
39#define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
40
41#define DRV_NAME "poch"
42#define PFX DRV_NAME ": "
43
44/*
45 * BAR0 Bridge Register Definitions
46 */
47
48#define BRIDGE_REV_REG 0x0
49#define BRIDGE_INT_MASK_REG 0x4
50#define BRIDGE_INT_STAT_REG 0x8
51
52#define BRIDGE_INT_ACTIVE (0x1 << 31)
53#define BRIDGE_INT_FPGA (0x1 << 2)
54#define BRIDGE_INT_TEMP_FAIL (0x1 << 1)
55#define BRIDGE_INT_TEMP_WARN (0x1 << 0)
56
57#define BRIDGE_FPGA_RESET_REG 0xC
58
59#define BRIDGE_CARD_POWER_REG 0x10
60#define BRIDGE_CARD_POWER_EN (0x1 << 0)
61#define BRIDGE_CARD_POWER_PROG_DONE (0x1 << 31)
62
63#define BRIDGE_JTAG_REG 0x14
64#define BRIDGE_DMA_GO_REG 0x18
65#define BRIDGE_STAT_0_REG 0x1C
66#define BRIDGE_STAT_1_REG 0x20
67#define BRIDGE_STAT_2_REG 0x24
68#define BRIDGE_STAT_3_REG 0x28
69#define BRIDGE_TEMP_STAT_REG 0x2C
70#define BRIDGE_TEMP_THRESH_REG 0x30
71#define BRIDGE_EEPROM_REVSEL_REG 0x34
72#define BRIDGE_CIS_STRUCT_REG 0x100
73#define BRIDGE_BOARDREV_REG 0x124
74
75/*
76 * BAR1 FPGA Register Definitions
77 */
78
79#define FPGA_IFACE_REV_REG 0x0
80#define FPGA_RX_BLOCK_SIZE_REG 0x8
81#define FPGA_TX_BLOCK_SIZE_REG 0xC
82#define FPGA_RX_BLOCK_COUNT_REG 0x10
83#define FPGA_TX_BLOCK_COUNT_REG 0x14
84#define FPGA_RX_CURR_DMA_BLOCK_REG 0x18
85#define FPGA_TX_CURR_DMA_BLOCK_REG 0x1C
86#define FPGA_RX_GROUP_COUNT_REG 0x20
87#define FPGA_TX_GROUP_COUNT_REG 0x24
88#define FPGA_RX_CURR_GROUP_REG 0x28
89#define FPGA_TX_CURR_GROUP_REG 0x2C
90#define FPGA_RX_CURR_PCI_REG 0x38
91#define FPGA_TX_CURR_PCI_REG 0x3C
92#define FPGA_RX_GROUP0_START_REG 0x40
93#define FPGA_TX_GROUP0_START_REG 0xC0
94#define FPGA_DMA_DESC_1_REG 0x140
95#define FPGA_DMA_DESC_2_REG 0x144
96#define FPGA_DMA_DESC_3_REG 0x148
97#define FPGA_DMA_DESC_4_REG 0x14C
98
99#define FPGA_DMA_INT_STAT_REG 0x150
100#define FPGA_DMA_INT_MASK_REG 0x154
101#define FPGA_DMA_INT_RX (1 << 0)
102#define FPGA_DMA_INT_TX (1 << 1)
103
104#define FPGA_RX_GROUPS_PER_INT_REG 0x158
105#define FPGA_TX_GROUPS_PER_INT_REG 0x15C
106#define FPGA_DMA_ADR_PAGE_REG 0x160
107#define FPGA_FPGA_REV_REG 0x200
108
109#define FPGA_ADC_CLOCK_CTL_REG 0x204
110#define FPGA_ADC_CLOCK_CTL_OSC_EN (0x1 << 3)
111#define FPGA_ADC_CLOCK_LOCAL_CLK (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
112#define FPGA_ADC_CLOCK_EXT_SAMP_CLK 0X0
113
114#define FPGA_ADC_DAC_EN_REG 0x208
115#define FPGA_ADC_DAC_EN_DAC_OFF (0x1 << 1)
116#define FPGA_ADC_DAC_EN_ADC_OFF (0x1 << 0)
117
118#define FPGA_INT_STAT_REG 0x20C
119#define FPGA_INT_MASK_REG 0x210
120#define FPGA_INT_PLL_UNLOCKED (0x1 << 9)
121#define FPGA_INT_DMA_CORE (0x1 << 8)
122#define FPGA_INT_TX_FF_EMPTY (0x1 << 7)
123#define FPGA_INT_RX_FF_EMPTY (0x1 << 6)
124#define FPGA_INT_TX_FF_OVRFLW (0x1 << 3)
125#define FPGA_INT_RX_FF_OVRFLW (0x1 << 2)
126#define FPGA_INT_TX_ACQ_DONE (0x1 << 1)
127#define FPGA_INT_RX_ACQ_DONE (0x1)
128
129#define FPGA_RX_ADC_CTL_REG 0x214
130#define FPGA_RX_ADC_CTL_CONT_CAP (0x0)
131#define FPGA_RX_ADC_CTL_SNAP_CAP (0x1)
132
133#define FPGA_RX_ARM_REG 0x21C
134
135#define FPGA_DOM_REG 0x224
136#define FPGA_DOM_DCM_RESET (0x1 << 5)
137#define FPGA_DOM_SOFT_RESET (0x1 << 4)
138#define FPGA_DOM_DUAL_M_SG_DMA (0x0)
139#define FPGA_DOM_TARGET_ACCESS (0x1)
140
141#define FPGA_TX_CTL_REG 0x228
142#define FPGA_TX_CTL_FIFO_FLUSH (0x1 << 9)
143#define FPGA_TX_CTL_OUTPUT_ZERO (0x0 << 2)
144#define FPGA_TX_CTL_OUTPUT_CARDBUS (0x1 << 2)
145#define FPGA_TX_CTL_OUTPUT_ADC (0x2 << 2)
146#define FPGA_TX_CTL_OUTPUT_SNAPSHOT (0x3 << 2)
147#define FPGA_TX_CTL_LOOPBACK (0x1 << 0)
148
149#define FPGA_ENDIAN_MODE_REG 0x22C
150#define FPGA_RX_FIFO_COUNT_REG 0x28C
151#define FPGA_TX_ENABLE_REG 0x298
152#define FPGA_TX_TRIGGER_REG 0x29C
153#define FPGA_TX_DATAMEM_COUNT_REG 0x2A8
154#define FPGA_CAP_FIFO_REG 0x300
155#define FPGA_TX_SNAPSHOT_REG 0x8000
156
157/*
158 * Channel Index Definitions
159 */
160
161enum {
162 CHNO_RX_CHANNEL,
163 CHNO_TX_CHANNEL,
164};
165
166struct poch_dev;
167
168enum channel_dir {
169 CHANNEL_DIR_RX,
170 CHANNEL_DIR_TX,
171};
172
173struct poch_group_info {
174 struct page *pg;
175 dma_addr_t dma_addr;
176 unsigned long user_offset;
177};
178
179struct channel_info {
180 unsigned int chno;
181
182 atomic_t sys_block_size;
183 atomic_t sys_group_size;
184 atomic_t sys_group_count;
185
186 enum channel_dir dir;
187
188 unsigned long block_size;
189 unsigned long group_size;
190 unsigned long group_count;
191
192 /* Contains the DMA address and VM offset of each group. */
193 struct poch_group_info *groups;
194
195 /* Contains the header and circular buffer exported to userspace. */
196 spinlock_t group_offsets_lock;
197 struct poch_cbuf_header *header;
198 struct page *header_pg;
199 unsigned long header_size;
200
201 /* Last group indicated as 'complete' to user space. */
202 unsigned int transfer;
203
204 wait_queue_head_t wq;
205
206 union {
207 unsigned int data_available;
208 unsigned int space_available;
209 };
210
211 void __iomem *bridge_iomem;
212 void __iomem *fpga_iomem;
213 spinlock_t *iomem_lock;
214
215 atomic_t free;
216 atomic_t inited;
217
218 /* Error counters */
219 struct poch_counters counters;
220 spinlock_t counters_lock;
221
222 struct device *dev;
223};
224
225struct poch_dev {
226 struct uio_info uio;
227 struct pci_dev *pci_dev;
228 unsigned int nchannels;
229 struct channel_info channels[POCH_NCHANNELS];
230 struct cdev cdev;
231
232 /* Counts the no. of channels that have been opened. On first
233 * open, the card is powered on. On last channel close, the
234 * card is powered off.
235 */
236 atomic_t usage;
237
238 void __iomem *bridge_iomem;
239 void __iomem *fpga_iomem;
240 spinlock_t iomem_lock;
241
242 struct device *dev;
243};
244
245static dev_t poch_first_dev;
246static struct class *poch_cls;
247static DEFINE_IDR(poch_ids);
248
249static ssize_t store_block_size(struct device *dev,
250 struct device_attribute *attr,
251 const char *buf, size_t count)
252{
253 struct channel_info *channel = dev_get_drvdata(dev);
254 unsigned long block_size;
255
256 sscanf(buf, "%lu", &block_size);
257 atomic_set(&channel->sys_block_size, block_size);
258
259 return count;
260}
261static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size);
262
263static ssize_t store_group_size(struct device *dev,
264 struct device_attribute *attr,
265 const char *buf, size_t count)
266{
267 struct channel_info *channel = dev_get_drvdata(dev);
268 unsigned long group_size;
269
270 sscanf(buf, "%lu", &group_size);
271 atomic_set(&channel->sys_group_size, group_size);
272
273 return count;
274}
275static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size);
276
277static ssize_t store_group_count(struct device *dev,
278 struct device_attribute *attr,
279 const char *buf, size_t count)
280{
281 struct channel_info *channel = dev_get_drvdata(dev);
282 unsigned long group_count;
283
284 sscanf(buf, "%lu", &group_count);
285 atomic_set(&channel->sys_group_count, group_count);
286
287 return count;
288}
289static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count);
290
291static ssize_t show_direction(struct device *dev,
292 struct device_attribute *attr, char *buf)
293{
294 struct channel_info *channel = dev_get_drvdata(dev);
295 int len;
296
297 len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx"));
298 return len;
299}
300static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL);
301
302static ssize_t show_mmap_size(struct device *dev,
303 struct device_attribute *attr, char *buf)
304{
305 struct channel_info *channel = dev_get_drvdata(dev);
306 int len;
307 unsigned long mmap_size;
308 unsigned long group_pages;
309 unsigned long header_pages;
310 unsigned long total_group_pages;
311
312 /* FIXME: We do not have to add 1, if group_size a multiple of
313 PAGE_SIZE. */
314 group_pages = (channel->group_size / PAGE_SIZE) + 1;
315 header_pages = (channel->header_size / PAGE_SIZE) + 1;
316 total_group_pages = group_pages * channel->group_count;
317
318 mmap_size = (header_pages + total_group_pages) * PAGE_SIZE;
319 len = sprintf(buf, "%lu\n", mmap_size);
320 return len;
321}
322static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL);
323
324static struct device_attribute *poch_class_attrs[] = {
325 &dev_attr_block_size,
326 &dev_attr_group_size,
327 &dev_attr_group_count,
328 &dev_attr_dir,
329 &dev_attr_mmap_size,
330};
331
332static void poch_channel_free_groups(struct channel_info *channel)
333{
334 unsigned long i;
335
336 for (i = 0; i < channel->group_count; i++) {
337 struct poch_group_info *group;
338 unsigned int order;
339
340 group = &channel->groups[i];
341 order = get_order(channel->group_size);
342 if (group->pg)
343 __free_pages(group->pg, order);
344 }
345}
346
347static int poch_channel_alloc_groups(struct channel_info *channel)
348{
349 unsigned long i;
350 unsigned long group_pages;
351 unsigned long header_pages;
352
353 group_pages = (channel->group_size / PAGE_SIZE) + 1;
354 header_pages = (channel->header_size / PAGE_SIZE) + 1;
355
356 for (i = 0; i < channel->group_count; i++) {
357 struct poch_group_info *group;
358 unsigned int order;
359 gfp_t gfp_mask;
360
361 group = &channel->groups[i];
362 order = get_order(channel->group_size);
363
364 /*
365 * __GFP_COMP is required here since we are going to
366 * perform non-linear mapping to userspace. For more
367 * information read the vm_insert_page() function
368 * comments.
369 */
370
371 gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO;
372 group->pg = alloc_pages(gfp_mask, order);
373 if (!group->pg) {
374 poch_channel_free_groups(channel);
375 return -ENOMEM;
376 }
377
378 /* FIXME: This is the physical address not the bus
379 * address! This won't work in architectures that
380 * have an IOMMU. Can we use pci_map_single() for
381 * this?
382 */
383 group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE;
384 group->user_offset =
385 (header_pages + (i * group_pages)) * PAGE_SIZE;
386
387 printk(KERN_INFO PFX "%ld: user_offset: 0x%lx dma: 0x%x\n", i,
388 group->user_offset, group->dma_addr);
389 }
390
391 return 0;
392}
393
394static void channel_latch_attr(struct channel_info *channel)
395{
396 channel->group_count = atomic_read(&channel->sys_group_count);
397 channel->group_size = atomic_read(&channel->sys_group_size);
398 channel->block_size = atomic_read(&channel->sys_block_size);
399}
400
401/*
402 * Configure DMA group registers
403 */
404static void channel_dma_init(struct channel_info *channel)
405{
406 void __iomem *fpga = channel->fpga_iomem;
407 u32 group_regs_base;
408 u32 group_reg;
409 unsigned int page;
410 unsigned int group_in_page;
411 unsigned long i;
412 u32 block_size_reg;
413 u32 block_count_reg;
414 u32 group_count_reg;
415 u32 groups_per_int_reg;
416 u32 curr_pci_reg;
417
418 if (channel->chno == CHNO_RX_CHANNEL) {
419 group_regs_base = FPGA_RX_GROUP0_START_REG;
420 block_size_reg = FPGA_RX_BLOCK_SIZE_REG;
421 block_count_reg = FPGA_RX_BLOCK_COUNT_REG;
422 group_count_reg = FPGA_RX_GROUP_COUNT_REG;
423 groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG;
424 curr_pci_reg = FPGA_RX_CURR_PCI_REG;
425 } else {
426 group_regs_base = FPGA_TX_GROUP0_START_REG;
427 block_size_reg = FPGA_TX_BLOCK_SIZE_REG;
428 block_count_reg = FPGA_TX_BLOCK_COUNT_REG;
429 group_count_reg = FPGA_TX_GROUP_COUNT_REG;
430 groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG;
431 curr_pci_reg = FPGA_TX_CURR_PCI_REG;
432 }
433
434 printk(KERN_WARNING "block_size, group_size, group_count\n");
435 iowrite32(channel->block_size, fpga + block_size_reg);
436 iowrite32(channel->group_size / channel->block_size,
437 fpga + block_count_reg);
438 iowrite32(channel->group_count, fpga + group_count_reg);
439 /* FIXME: Hardcoded groups per int. Get it from sysfs? */
440 iowrite32(1, fpga + groups_per_int_reg);
441
442 /* Unlock PCI address? Not defined in the data sheet, but used
443 * in the reference code by Redrapids.
444 */
445 iowrite32(0x1, fpga + curr_pci_reg);
446
447 /* The DMA address page register is shared between the RX and
448 * TX channels, so acquire lock.
449 */
450 spin_lock(channel->iomem_lock);
451 for (i = 0; i < channel->group_count; i++) {
452 page = i / 32;
453 group_in_page = i % 32;
454
455 group_reg = group_regs_base + (group_in_page * 4);
456
457 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
458 iowrite32(channel->groups[i].dma_addr, fpga + group_reg);
459 }
460 for (i = 0; i < channel->group_count; i++) {
461 page = i / 32;
462 group_in_page = i % 32;
463
464 group_reg = group_regs_base + (group_in_page * 4);
465
466 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
467 printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i,
468 ioread32(fpga + group_reg));
469 }
470 spin_unlock(channel->iomem_lock);
471
472}
473
474static int poch_channel_alloc_header(struct channel_info *channel)
475{
476 struct poch_cbuf_header *header = channel->header;
477 unsigned long group_offset_size;
478 unsigned long tot_group_offsets_size;
479
480 /* Allocate memory to hold header exported userspace */
481 group_offset_size = sizeof(header->group_offsets[0]);
482 tot_group_offsets_size = group_offset_size * channel->group_count;
483 channel->header_size = sizeof(*header) + tot_group_offsets_size;
484 channel->header_pg = alloc_pages(GFP_KERNEL | __GFP_ZERO,
485 get_order(channel->header_size));
486 if (!channel->header_pg)
487 return -ENOMEM;
488
489 channel->header = page_address(channel->header_pg);
490
491 return 0;
492}
493
494static void poch_channel_free_header(struct channel_info *channel)
495{
496 unsigned int order;
497
498 order = get_order(channel->header_size);
499 __free_pages(channel->header_pg, order);
500}
501
502static void poch_channel_init_header(struct channel_info *channel)
503{
504 int i;
505 struct poch_group_info *groups;
506 s32 *group_offsets;
507
508 channel->header->group_size_bytes = channel->group_size;
509 channel->header->group_count = channel->group_count;
510
511 spin_lock_init(&channel->group_offsets_lock);
512
513 group_offsets = channel->header->group_offsets;
514 groups = channel->groups;
515
516 for (i = 0; i < channel->group_count; i++) {
517 if (channel->dir == CHANNEL_DIR_RX)
518 group_offsets[i] = -1;
519 else
520 group_offsets[i] = groups[i].user_offset;
521 }
522}
523
524static void __poch_channel_clear_counters(struct channel_info *channel)
525{
526 channel->counters.pll_unlock = 0;
527 channel->counters.fifo_empty = 0;
528 channel->counters.fifo_overflow = 0;
529}
530
531static int poch_channel_init(struct channel_info *channel,
532 struct poch_dev *poch_dev)
533{
534 struct pci_dev *pdev = poch_dev->pci_dev;
535 struct device *dev = &pdev->dev;
536 unsigned long alloc_size;
537 int ret;
538
539 printk(KERN_WARNING "channel_latch_attr\n");
540
541 channel_latch_attr(channel);
542
543 channel->transfer = 0;
544
545 /* Allocate memory to hold group information. */
546 alloc_size = channel->group_count * sizeof(struct poch_group_info);
547 channel->groups = kzalloc(alloc_size, GFP_KERNEL);
548 if (!channel->groups) {
549 dev_err(dev, "error allocating memory for group info\n");
550 ret = -ENOMEM;
551 goto out;
552 }
553
554 printk(KERN_WARNING "poch_channel_alloc_groups\n");
555
556 ret = poch_channel_alloc_groups(channel);
557 if (ret) {
558 dev_err(dev, "error allocating groups of order %d\n",
559 get_order(channel->group_size));
560 goto out_free_group_info;
561 }
562
563 ret = poch_channel_alloc_header(channel);
564 if (ret) {
565 dev_err(dev, "error allocating user space header\n");
566 goto out_free_groups;
567 }
568
569 channel->fpga_iomem = poch_dev->fpga_iomem;
570 channel->bridge_iomem = poch_dev->bridge_iomem;
571 channel->iomem_lock = &poch_dev->iomem_lock;
572 spin_lock_init(&channel->counters_lock);
573
574 __poch_channel_clear_counters(channel);
575
576 printk(KERN_WARNING "poch_channel_init_header\n");
577
578 poch_channel_init_header(channel);
579
580 return 0;
581
582 out_free_groups:
583 poch_channel_free_groups(channel);
584 out_free_group_info:
585 kfree(channel->groups);
586 out:
587 return ret;
588}
589
590static int poch_wait_fpga_prog(void __iomem *bridge)
591{
592 unsigned long total_wait;
593 const unsigned long wait_period = 100;
594 /* FIXME: Get the actual timeout */
595 const unsigned long prog_timeo = 10000; /* 10 Seconds */
596 u32 card_power;
597
598 printk(KERN_WARNING "poch_wait_fpg_prog\n");
599
600 printk(KERN_INFO PFX "programming fpga ...\n");
601 total_wait = 0;
602 while (1) {
603 msleep(wait_period);
604 total_wait += wait_period;
605
606 card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
607 if (card_power & BRIDGE_CARD_POWER_PROG_DONE) {
608 printk(KERN_INFO PFX "programming done\n");
609 return 0;
610 }
611 if (total_wait > prog_timeo) {
612 printk(KERN_ERR PFX
613 "timed out while programming FPGA\n");
614 return -EIO;
615 }
616 }
617}
618
619static void poch_card_power_off(struct poch_dev *poch_dev)
620{
621 void __iomem *bridge = poch_dev->bridge_iomem;
622 u32 card_power;
623
624 iowrite32(0, bridge + BRIDGE_INT_MASK_REG);
625 iowrite32(0, bridge + BRIDGE_DMA_GO_REG);
626
627 card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
628 iowrite32(card_power & ~BRIDGE_CARD_POWER_EN,
629 bridge + BRIDGE_CARD_POWER_REG);
630}
631
632enum clk_src {
633 CLK_SRC_ON_BOARD,
634 CLK_SRC_EXTERNAL
635};
636
637static void poch_card_clock_on(void __iomem *fpga)
638{
639 /* FIXME: Get this data through sysfs? */
640 enum clk_src clk_src = CLK_SRC_ON_BOARD;
641
642 if (clk_src == CLK_SRC_ON_BOARD) {
643 iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN,
644 fpga + FPGA_ADC_CLOCK_CTL_REG);
645 } else if (clk_src == CLK_SRC_EXTERNAL) {
646 iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK,
647 fpga + FPGA_ADC_CLOCK_CTL_REG);
648 }
649}
650
651static int poch_card_power_on(struct poch_dev *poch_dev)
652{
653 void __iomem *bridge = poch_dev->bridge_iomem;
654 void __iomem *fpga = poch_dev->fpga_iomem;
655
656 iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG);
657
658 if (poch_wait_fpga_prog(bridge) != 0) {
659 poch_card_power_off(poch_dev);
660 return -EIO;
661 }
662
663 poch_card_clock_on(fpga);
664
665 /* Sync to new clock, reset state machines, set DMA mode. */
666 iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET
667 | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG);
668
669 /* FIXME: The time required for sync. needs to be tuned. */
670 msleep(1000);
671
672 return 0;
673}
674
675static void poch_channel_analog_on(struct channel_info *channel)
676{
677 void __iomem *fpga = channel->fpga_iomem;
678 u32 adc_dac_en;
679
680 spin_lock(channel->iomem_lock);
681 adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG);
682 switch (channel->chno) {
683 case CHNO_RX_CHANNEL:
684 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF,
685 fpga + FPGA_ADC_DAC_EN_REG);
686 break;
687 case CHNO_TX_CHANNEL:
688 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF,
689 fpga + FPGA_ADC_DAC_EN_REG);
690 break;
691 }
692 spin_unlock(channel->iomem_lock);
693}
694
695static int poch_open(struct inode *inode, struct file *filp)
696{
697 struct poch_dev *poch_dev;
698 struct channel_info *channel;
699 void __iomem *bridge;
700 void __iomem *fpga;
701 int chno;
702 int usage;
703 int ret;
704
705 poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
706 bridge = poch_dev->bridge_iomem;
707 fpga = poch_dev->fpga_iomem;
708
709 chno = iminor(inode) % poch_dev->nchannels;
710 channel = &poch_dev->channels[chno];
711
712 if (!atomic_dec_and_test(&channel->free)) {
713 atomic_inc(&channel->free);
714 ret = -EBUSY;
715 goto out;
716 }
717
718 usage = atomic_inc_return(&poch_dev->usage);
719
720 printk(KERN_WARNING "poch_card_power_on\n");
721
722 if (usage == 1) {
723 ret = poch_card_power_on(poch_dev);
724 if (ret)
725 goto out_dec_usage;
726 }
727
728 printk(KERN_INFO "CardBus Bridge Revision: %x\n",
729 ioread32(bridge + BRIDGE_REV_REG));
730 printk(KERN_INFO "CardBus Interface Revision: %x\n",
731 ioread32(fpga + FPGA_IFACE_REV_REG));
732
733 channel->chno = chno;
734 filp->private_data = channel;
735
736 printk(KERN_WARNING "poch_channel_init\n");
737
738 ret = poch_channel_init(channel, poch_dev);
739 if (ret)
740 goto out_power_off;
741
742 poch_channel_analog_on(channel);
743
744 printk(KERN_WARNING "channel_dma_init\n");
745
746 channel_dma_init(channel);
747
748 printk(KERN_WARNING "poch_channel_analog_on\n");
749
750 if (usage == 1) {
751 printk(KERN_WARNING "setting up DMA\n");
752
753 /* Initialize DMA Controller. */
754 iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG);
755 iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG);
756
757 ioread32(fpga + FPGA_DMA_INT_STAT_REG);
758 ioread32(fpga + FPGA_INT_STAT_REG);
759 ioread32(bridge + BRIDGE_INT_STAT_REG);
760
761 /* Initialize Interrupts. FIXME: Enable temperature
762 * handling We are enabling both Tx and Rx channel
763 * interrupts here. Do we need to enable interrupts
764 * only for the current channel? Anyways we won't get
765 * the interrupt unless the DMA is activated.
766 */
767 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
768 iowrite32(FPGA_INT_DMA_CORE
769 | FPGA_INT_PLL_UNLOCKED
770 | FPGA_INT_TX_FF_EMPTY
771 | FPGA_INT_RX_FF_EMPTY
772 | FPGA_INT_TX_FF_OVRFLW
773 | FPGA_INT_RX_FF_OVRFLW,
774 fpga + FPGA_INT_MASK_REG);
775 iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX,
776 fpga + FPGA_DMA_INT_MASK_REG);
777 }
778
779 if (channel->dir == CHANNEL_DIR_TX) {
780 /* Flush TX FIFO and output data from cardbus. */
781 iowrite32(FPGA_TX_CTL_FIFO_FLUSH
782 | FPGA_TX_CTL_OUTPUT_CARDBUS,
783 fpga + FPGA_TX_CTL_REG);
784 }
785
786 atomic_inc(&channel->inited);
787
788 return 0;
789
790 out_power_off:
791 if (usage == 1)
792 poch_card_power_off(poch_dev);
793 out_dec_usage:
794 atomic_dec(&poch_dev->usage);
795 atomic_inc(&channel->free);
796 out:
797 return ret;
798}
799
800static int poch_release(struct inode *inode, struct file *filp)
801{
802 struct channel_info *channel = filp->private_data;
803 struct poch_dev *poch_dev;
804 int usage;
805
806 poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
807
808 usage = atomic_dec_return(&poch_dev->usage);
809 if (usage == 0) {
810 printk(KERN_WARNING "poch_card_power_off\n");
811 poch_card_power_off(poch_dev);
812 }
813
814 atomic_dec(&channel->inited);
815 poch_channel_free_header(channel);
816 poch_channel_free_groups(channel);
817 kfree(channel->groups);
818 atomic_inc(&channel->free);
819
820 return 0;
821}
822
823/*
824 * Map the header and the group buffers, to user space.
825 */
826static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
827{
828 struct channel_info *channel = filp->private_data;
829
830 unsigned long start;
831 unsigned long size;
832
833 unsigned long group_pages;
834 unsigned long header_pages;
835 unsigned long total_group_pages;
836
837 int pg_num;
838 struct page *pg;
839
840 int i;
841 int ret;
842
843 printk(KERN_WARNING "poch_mmap\n");
844
845 if (vma->vm_pgoff) {
846 printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff);
847 return -EINVAL;
848 }
849
850 group_pages = (channel->group_size / PAGE_SIZE) + 1;
851 header_pages = (channel->header_size / PAGE_SIZE) + 1;
852 total_group_pages = group_pages * channel->group_count;
853
854 size = vma->vm_end - vma->vm_start;
855 if (size != (header_pages + total_group_pages) * PAGE_SIZE) {
856 printk(KERN_WARNING PFX "required %lu bytes\n", size);
857 return -EINVAL;
858 }
859
860 start = vma->vm_start;
861
862 /* FIXME: Cleanup required on failure? */
863 pg = channel->header_pg;
864 for (pg_num = 0; pg_num < header_pages; pg_num++, pg++) {
865 printk(KERN_DEBUG PFX "page_count: %d\n", page_count(pg));
866 printk(KERN_DEBUG PFX "%d: header: 0x%lx\n", pg_num, start);
867 ret = vm_insert_page(vma, start, pg);
868 if (ret) {
869 printk(KERN_DEBUG "vm_insert 1 failed at %lx\n", start);
870 return ret;
871 }
872 start += PAGE_SIZE;
873 }
874
875 for (i = 0; i < channel->group_count; i++) {
876 pg = channel->groups[i].pg;
877 for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) {
878 printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n",
879 pg_num, i, start);
880 ret = vm_insert_page(vma, start, pg);
881 if (ret) {
882 printk(KERN_DEBUG PFX
883 "vm_insert 2 failed at %d\n", pg_num);
884 return ret;
885 }
886 start += PAGE_SIZE;
887 }
888 }
889
890 return 0;
891}
892
893/*
894 * Check whether there is some group that the user space has not
895 * consumed yet. When the user space consumes a group, it sets it to
896 * -1. Cosuming could be reading data in case of RX and filling a
897 * buffer in case of TX.
898 */
899static int poch_channel_available(struct channel_info *channel)
900{
901 int i;
902
903 spin_lock_irq(&channel->group_offsets_lock);
904
905 for (i = 0; i < channel->group_count; i++) {
906 if (channel->dir == CHANNEL_DIR_RX
907 && channel->header->group_offsets[i] == -1) {
908 spin_unlock_irq(&channel->group_offsets_lock);
909 return 1;
910 }
911
912 if (channel->dir == CHANNEL_DIR_TX
913 && channel->header->group_offsets[i] != -1) {
914 spin_unlock_irq(&channel->group_offsets_lock);
915 return 1;
916 }
917 }
918
919 spin_unlock_irq(&channel->group_offsets_lock);
920
921 return 0;
922}
923
924static unsigned int poch_poll(struct file *filp, poll_table *pt)
925{
926 struct channel_info *channel = filp->private_data;
927 unsigned int ret = 0;
928
929 poll_wait(filp, &channel->wq, pt);
930
931 if (poch_channel_available(channel)) {
932 if (channel->dir == CHANNEL_DIR_RX)
933 ret = POLLIN | POLLRDNORM;
934 else
935 ret = POLLOUT | POLLWRNORM;
936 }
937
938 return ret;
939}
940
941static int poch_ioctl(struct inode *inode, struct file *filp,
942 unsigned int cmd, unsigned long arg)
943{
944 struct channel_info *channel = filp->private_data;
945 void __iomem *fpga = channel->fpga_iomem;
946 void __iomem *bridge = channel->bridge_iomem;
947 void __user *argp = (void __user *)arg;
948 struct vm_area_struct *vms;
949 struct poch_counters counters;
950 int ret;
951
952 switch (cmd) {
953 case POCH_IOC_TRANSFER_START:
954 switch (channel->chno) {
955 case CHNO_TX_CHANNEL:
956 printk(KERN_INFO PFX "ioctl: Tx start\n");
957 iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG);
958 iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG);
959
960 /* FIXME: Does it make sense to do a DMA GO
961 * twice, once in Tx and once in Rx.
962 */
963 iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
964 break;
965 case CHNO_RX_CHANNEL:
966 printk(KERN_INFO PFX "ioctl: Rx start\n");
967 iowrite32(0x1, fpga + FPGA_RX_ARM_REG);
968 iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
969 break;
970 }
971 break;
972 case POCH_IOC_TRANSFER_STOP:
973 switch (channel->chno) {
974 case CHNO_TX_CHANNEL:
975 printk(KERN_INFO PFX "ioctl: Tx stop\n");
976 iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG);
977 iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG);
978 iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
979 break;
980 case CHNO_RX_CHANNEL:
981 printk(KERN_INFO PFX "ioctl: Rx stop\n");
982 iowrite32(0x0, fpga + FPGA_RX_ARM_REG);
983 iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
984 break;
985 }
986 break;
987 case POCH_IOC_GET_COUNTERS:
988 if (access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
989 return -EFAULT;
990
991 spin_lock_irq(&channel->counters_lock);
992 counters = channel->counters;
993 __poch_channel_clear_counters(channel);
994 spin_unlock_irq(&channel->counters_lock);
995
996 ret = copy_to_user(argp, &counters,
997 sizeof(struct poch_counters));
998 if (ret)
999 return ret;
1000
1001 break;
1002 case POCH_IOC_SYNC_GROUP_FOR_USER:
1003 case POCH_IOC_SYNC_GROUP_FOR_DEVICE:
1004 vms = find_vma(current->mm, arg);
1005 if (!vms)
1006 /* Address not mapped. */
1007 return -EINVAL;
1008 if (vms->vm_file != filp)
1009 /* Address mapped from different device/file. */
1010 return -EINVAL;
1011
1012 flush_cache_range(vms, arg, arg + channel->group_size);
1013 break;
1014 }
1015 return 0;
1016}
1017
1018static struct file_operations poch_fops = {
1019 .owner = THIS_MODULE,
1020 .open = poch_open,
1021 .release = poch_release,
1022 .ioctl = poch_ioctl,
1023 .poll = poch_poll,
1024 .mmap = poch_mmap
1025};
1026
1027static void poch_irq_dma(struct channel_info *channel)
1028{
1029 u32 prev_transfer;
1030 u32 curr_transfer;
1031 long groups_done;
1032 unsigned long i, j;
1033 struct poch_group_info *groups;
1034 s32 *group_offsets;
1035 u32 curr_group_reg;
1036
1037 if (!atomic_read(&channel->inited))
1038 return;
1039
1040 prev_transfer = channel->transfer;
1041
1042 if (channel->chno == CHNO_RX_CHANNEL)
1043 curr_group_reg = FPGA_RX_CURR_GROUP_REG;
1044 else
1045 curr_group_reg = FPGA_TX_CURR_GROUP_REG;
1046
1047 curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg);
1048
1049 groups_done = curr_transfer - prev_transfer;
1050 /* Check wrap over, and handle it. */
1051 if (groups_done <= 0)
1052 groups_done += channel->group_count;
1053
1054 group_offsets = channel->header->group_offsets;
1055 groups = channel->groups;
1056
1057 spin_lock(&channel->group_offsets_lock);
1058
1059 for (i = 0; i < groups_done; i++) {
1060 j = (prev_transfer + i) % channel->group_count;
1061 if (channel->dir == CHANNEL_DIR_RX)
1062 group_offsets[j] = -1;
1063 else
1064 group_offsets[j] = groups[j].user_offset;
1065 }
1066
1067 spin_unlock(&channel->group_offsets_lock);
1068
1069 channel->transfer = curr_transfer;
1070
1071 wake_up_interruptible(&channel->wq);
1072}
1073
1074static irqreturn_t poch_irq_handler(int irq, void *p)
1075{
1076 struct poch_dev *poch_dev = p;
1077 void __iomem *bridge = poch_dev->bridge_iomem;
1078 void __iomem *fpga = poch_dev->fpga_iomem;
1079 struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL];
1080 struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL];
1081 u32 bridge_stat;
1082 u32 fpga_stat;
1083 u32 dma_stat;
1084
1085 bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG);
1086 fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG);
1087 dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1088
1089 ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1090 ioread32(fpga + FPGA_INT_STAT_REG);
1091 ioread32(bridge + BRIDGE_INT_STAT_REG);
1092
1093 if (bridge_stat & BRIDGE_INT_FPGA) {
1094 if (fpga_stat & FPGA_INT_DMA_CORE) {
1095 if (dma_stat & FPGA_DMA_INT_RX)
1096 poch_irq_dma(channel_rx);
1097 if (dma_stat & FPGA_DMA_INT_TX)
1098 poch_irq_dma(channel_tx);
1099 }
1100 if (fpga_stat & FPGA_INT_PLL_UNLOCKED) {
1101 channel_tx->counters.pll_unlock++;
1102 channel_rx->counters.pll_unlock++;
1103 if (printk_ratelimit())
1104 printk(KERN_WARNING PFX "PLL unlocked\n");
1105 }
1106 if (fpga_stat & FPGA_INT_TX_FF_EMPTY)
1107 channel_tx->counters.fifo_empty++;
1108 if (fpga_stat & FPGA_INT_TX_FF_OVRFLW)
1109 channel_tx->counters.fifo_overflow++;
1110 if (fpga_stat & FPGA_INT_RX_FF_EMPTY)
1111 channel_rx->counters.fifo_empty++;
1112 if (fpga_stat & FPGA_INT_RX_FF_OVRFLW)
1113 channel_rx->counters.fifo_overflow++;
1114
1115 /*
1116 * FIXME: These errors should be notified through the
1117 * poll interface as POLLERR.
1118 */
1119
1120 /* Re-enable interrupts. */
1121 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
1122
1123 return IRQ_HANDLED;
1124 }
1125
1126 return IRQ_NONE;
1127}
1128
1129static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id)
1130{
1131 int i, j;
1132 int nattrs;
1133 struct channel_info *channel;
1134 dev_t devno;
1135
1136 if (poch_dev->dev == NULL)
1137 return;
1138
1139 for (i = 0; i < poch_dev->nchannels; i++) {
1140 channel = &poch_dev->channels[i];
1141 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1142
1143 if (!channel->dev)
1144 continue;
1145
1146 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1147 for (j = 0; j < nattrs; j++)
1148 device_remove_file(channel->dev, poch_class_attrs[j]);
1149
1150 device_unregister(channel->dev);
1151 }
1152
1153 device_unregister(poch_dev->dev);
1154}
1155
1156static int __devinit poch_class_dev_register(struct poch_dev *poch_dev,
1157 int id)
1158{
1159 struct device *dev = &poch_dev->pci_dev->dev;
1160 int i, j;
1161 int nattrs;
1162 int ret;
1163 struct channel_info *channel;
1164 dev_t devno;
1165
1166 poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev,
1167 MKDEV(0, 0), NULL, "poch%d", id);
1168 if (IS_ERR(poch_dev->dev)) {
1169 dev_err(dev, "error creating parent class device");
1170 ret = PTR_ERR(poch_dev->dev);
1171 poch_dev->dev = NULL;
1172 return ret;
1173 }
1174
1175 for (i = 0; i < poch_dev->nchannels; i++) {
1176 channel = &poch_dev->channels[i];
1177
1178 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1179 channel->dev = device_create(poch_cls, poch_dev->dev, devno,
1180 NULL, "ch%d", i);
1181 if (IS_ERR(channel->dev)) {
1182 dev_err(dev, "error creating channel class device");
1183 ret = PTR_ERR(channel->dev);
1184 channel->dev = NULL;
1185 poch_class_dev_unregister(poch_dev, id);
1186 return ret;
1187 }
1188
1189 dev_set_drvdata(channel->dev, channel);
1190 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1191 for (j = 0; j < nattrs; j++) {
1192 ret = device_create_file(channel->dev,
1193 poch_class_attrs[j]);
1194 if (ret) {
1195 dev_err(dev, "error creating attribute file");
1196 poch_class_dev_unregister(poch_dev, id);
1197 return ret;
1198 }
1199 }
1200 }
1201
1202 return 0;
1203}
1204
1205static int __devinit poch_pci_probe(struct pci_dev *pdev,
1206 const struct pci_device_id *pci_id)
1207{
1208 struct device *dev = &pdev->dev;
1209 struct poch_dev *poch_dev;
1210 struct uio_info *uio;
1211 int ret;
1212 int id;
1213 int i;
1214
1215 poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL);
1216 if (!poch_dev) {
1217 dev_err(dev, "error allocating priv. data memory\n");
1218 return -ENOMEM;
1219 }
1220
1221 poch_dev->pci_dev = pdev;
1222 uio = &poch_dev->uio;
1223
1224 pci_set_drvdata(pdev, poch_dev);
1225
1226 spin_lock_init(&poch_dev->iomem_lock);
1227
1228 poch_dev->nchannels = POCH_NCHANNELS;
1229 poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX;
1230 poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX;
1231
1232 for (i = 0; i < poch_dev->nchannels; i++) {
1233 init_waitqueue_head(&poch_dev->channels[i].wq);
1234 atomic_set(&poch_dev->channels[i].free, 1);
1235 atomic_set(&poch_dev->channels[i].inited, 0);
1236 }
1237
1238 ret = pci_enable_device(pdev);
1239 if (ret) {
1240 dev_err(dev, "error enabling device\n");
1241 goto out_free;
1242 }
1243
1244 ret = pci_request_regions(pdev, "poch");
1245 if (ret) {
1246 dev_err(dev, "error requesting resources\n");
1247 goto out_disable;
1248 }
1249
1250 uio->mem[0].addr = pci_resource_start(pdev, 1);
1251 if (!uio->mem[0].addr) {
1252 dev_err(dev, "invalid BAR1\n");
1253 ret = -ENODEV;
1254 goto out_release;
1255 }
1256
1257 uio->mem[0].size = pci_resource_len(pdev, 1);
1258 uio->mem[0].memtype = UIO_MEM_PHYS;
1259
1260 uio->name = "poch";
1261 uio->version = "0.0.1";
1262 uio->irq = -1;
1263 ret = uio_register_device(dev, uio);
1264 if (ret) {
1265 dev_err(dev, "error register UIO device: %d\n", ret);
1266 goto out_release;
1267 }
1268
1269 poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0),
1270 pci_resource_len(pdev, 0));
1271 if (poch_dev->bridge_iomem == NULL) {
1272 dev_err(dev, "error mapping bridge (bar0) registers\n");
1273 ret = -ENOMEM;
1274 goto out_uio_unreg;
1275 }
1276
1277 poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1),
1278 pci_resource_len(pdev, 1));
1279 if (poch_dev->fpga_iomem == NULL) {
1280 dev_err(dev, "error mapping fpga (bar1) registers\n");
1281 ret = -ENOMEM;
1282 goto out_bar0_unmap;
1283 }
1284
1285 ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED,
1286 dev->bus_id, poch_dev);
1287 if (ret) {
1288 dev_err(dev, "error requesting IRQ %u\n", pdev->irq);
1289 ret = -ENOMEM;
1290 goto out_bar1_unmap;
1291 }
1292
1293 if (!idr_pre_get(&poch_ids, GFP_KERNEL)) {
1294 dev_err(dev, "error allocating memory ids\n");
1295 ret = -ENOMEM;
1296 goto out_free_irq;
1297 }
1298
1299 idr_get_new(&poch_ids, poch_dev, &id);
1300 if (id >= MAX_POCH_CARDS) {
1301 dev_err(dev, "minors exhausted\n");
1302 ret = -EBUSY;
1303 goto out_free_irq;
1304 }
1305
1306 cdev_init(&poch_dev->cdev, &poch_fops);
1307 poch_dev->cdev.owner = THIS_MODULE;
1308 ret = cdev_add(&poch_dev->cdev,
1309 poch_first_dev + (id * poch_dev->nchannels),
1310 poch_dev->nchannels);
1311 if (ret) {
1312 dev_err(dev, "error register character device\n");
1313 goto out_idr_remove;
1314 }
1315
1316 ret = poch_class_dev_register(poch_dev, id);
1317 if (ret)
1318 goto out_cdev_del;
1319
1320 return 0;
1321
1322 out_cdev_del:
1323 cdev_del(&poch_dev->cdev);
1324 out_idr_remove:
1325 idr_remove(&poch_ids, id);
1326 out_free_irq:
1327 free_irq(pdev->irq, poch_dev);
1328 out_bar1_unmap:
1329 iounmap(poch_dev->fpga_iomem);
1330 out_bar0_unmap:
1331 iounmap(poch_dev->bridge_iomem);
1332 out_uio_unreg:
1333 uio_unregister_device(uio);
1334 out_release:
1335 pci_release_regions(pdev);
1336 out_disable:
1337 pci_disable_device(pdev);
1338 out_free:
1339 kfree(poch_dev);
1340 return ret;
1341}
1342
1343/*
1344 * FIXME: We are yet to handle the hot unplug case.
1345 */
1346static void poch_pci_remove(struct pci_dev *pdev)
1347{
1348 struct poch_dev *poch_dev = pci_get_drvdata(pdev);
1349 struct uio_info *uio = &poch_dev->uio;
1350 unsigned int minor = MINOR(poch_dev->cdev.dev);
1351 unsigned int id = minor / poch_dev->nchannels;
1352
1353 /* FIXME: unmap fpga_iomem and bridge_iomem */
1354
1355 poch_class_dev_unregister(poch_dev, id);
1356 cdev_del(&poch_dev->cdev);
1357 idr_remove(&poch_ids, id);
1358 free_irq(pdev->irq, poch_dev);
1359 uio_unregister_device(uio);
1360 pci_release_regions(pdev);
1361 pci_disable_device(pdev);
1362 pci_set_drvdata(pdev, NULL);
1363 iounmap(uio->mem[0].internal_addr);
1364
1365 kfree(poch_dev);
1366}
1367
1368static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = {
1369 { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS,
1370 PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) },
1371 { 0, }
1372};
1373
1374static struct pci_driver poch_pci_driver = {
1375 .name = DRV_NAME,
1376 .id_table = poch_pci_ids,
1377 .probe = poch_pci_probe,
1378 .remove = poch_pci_remove,
1379};
1380
1381static int __init poch_init_module(void)
1382{
1383 int ret = 0;
1384
1385 ret = alloc_chrdev_region(&poch_first_dev, 0,
1386 MAX_POCH_DEVICES, DRV_NAME);
1387 if (ret) {
1388 printk(KERN_ERR PFX "error allocating device no.");
1389 return ret;
1390 }
1391
1392 poch_cls = class_create(THIS_MODULE, "pocketchange");
1393 if (IS_ERR(poch_cls)) {
1394 ret = PTR_ERR(poch_cls);
1395 goto out_unreg_chrdev;
1396 }
1397
1398 ret = pci_register_driver(&poch_pci_driver);
1399 if (ret) {
1400 printk(KERN_ERR PFX "error register PCI device");
1401 goto out_class_destroy;
1402 }
1403
1404 return 0;
1405
1406 out_class_destroy:
1407 class_destroy(poch_cls);
1408
1409 out_unreg_chrdev:
1410 unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1411
1412 return ret;
1413}
1414
1415static void __exit poch_exit_module(void)
1416{
1417 pci_unregister_driver(&poch_pci_driver);
1418 class_destroy(poch_cls);
1419 unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1420}
1421
1422module_init(poch_init_module);
1423module_exit(poch_exit_module);
1424
1425MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/poch/poch.h b/drivers/staging/poch/poch.h
new file mode 100644
index 000000000000..51a2d145798e
--- /dev/null
+++ b/drivers/staging/poch/poch.h
@@ -0,0 +1,29 @@
1/*
2 * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
3 *
4 * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
5 *
6 * Part of userspace API. Should be moved to a header file in
7 * include/linux for final version.
8 *
9 */
10struct poch_cbuf_header {
11 __s32 group_size_bytes;
12 __s32 group_count;
13 __s32 group_offsets[0];
14};
15
16struct poch_counters {
17 __u32 fifo_empty;
18 __u32 fifo_overflow;
19 __u32 pll_unlock;
20};
21
22#define POCH_IOC_NUM '9'
23
24#define POCH_IOC_TRANSFER_START _IO(POCH_IOC_NUM, 0)
25#define POCH_IOC_TRANSFER_STOP _IO(POCH_IOC_NUM, 1)
26#define POCH_IOC_GET_COUNTERS _IOR(POCH_IOC_NUM, 2, \
27 struct poch_counters)
28#define POCH_IOC_SYNC_GROUP_FOR_USER _IO(POCH_IOC_NUM, 3)
29#define POCH_IOC_SYNC_GROUP_FOR_DEVICE _IO(POCH_IOC_NUM, 4)
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index b61ac4b2db9e..8fa9490b3e2c 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -54,7 +54,6 @@
54 * IS-NIC driver. 54 * IS-NIC driver.
55 */ 55 */
56 56
57#include <linux/version.h>
58 57
59#define SLIC_DUMP_ENABLED 0 58#define SLIC_DUMP_ENABLED 0
60#define KLUDGE_FOR_4GB_BOUNDARY 1 59#define KLUDGE_FOR_4GB_BOUNDARY 1
@@ -96,17 +95,9 @@
96#include <linux/moduleparam.h> 95#include <linux/moduleparam.h>
97 96
98#include <linux/types.h> 97#include <linux/types.h>
99#include <linux/slab.h>
100#include <linux/delay.h>
101#include <linux/init.h>
102#include <linux/pci.h>
103#include <linux/dma-mapping.h> 98#include <linux/dma-mapping.h>
104#include <linux/netdevice.h>
105#include <linux/etherdevice.h>
106#include <linux/mii.h> 99#include <linux/mii.h>
107#include <linux/if_vlan.h> 100#include <linux/if_vlan.h>
108#include <linux/skbuff.h>
109#include <linux/string.h>
110#include <asm/unaligned.h> 101#include <asm/unaligned.h>
111 102
112#include <linux/ethtool.h> 103#include <linux/ethtool.h>
@@ -275,7 +266,6 @@ static void slic_dbg_register_trace(struct adapter *adapter,
275 card->reg_value[i], card->reg_valueh[i]); 266 card->reg_value[i], card->reg_valueh[i]);
276 } 267 }
277} 268}
278}
279#endif 269#endif
280 270
281static void slic_init_adapter(struct net_device *netdev, 271static void slic_init_adapter(struct net_device *netdev,
@@ -606,6 +596,7 @@ static void __devexit slic_entry_remove(struct pci_dev *pcidev)
606 uint mmio_len = 0; 596 uint mmio_len = 0;
607 struct adapter *adapter = (struct adapter *) netdev_priv(dev); 597 struct adapter *adapter = (struct adapter *) netdev_priv(dev);
608 struct sliccard *card; 598 struct sliccard *card;
599 struct mcast_address *mcaddr, *mlist;
609 600
610 ASSERT(adapter); 601 ASSERT(adapter);
611 DBG_MSG("slicoss: %s ENTER dev[%p] adapter[%p]\n", __func__, dev, 602 DBG_MSG("slicoss: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
@@ -625,6 +616,13 @@ static void __devexit slic_entry_remove(struct pci_dev *pcidev)
625 DBG_MSG("slicoss: %s iounmap dev->base_addr[%x]\n", __func__, 616 DBG_MSG("slicoss: %s iounmap dev->base_addr[%x]\n", __func__,
626 (uint) dev->base_addr); 617 (uint) dev->base_addr);
627 iounmap((void __iomem *)dev->base_addr); 618 iounmap((void __iomem *)dev->base_addr);
619 /* free multicast addresses */
620 mlist = adapter->mcastaddrs;
621 while (mlist) {
622 mcaddr = mlist;
623 mlist = mlist->next;
624 kfree(mcaddr);
625 }
628 ASSERT(adapter->card); 626 ASSERT(adapter->card);
629 card = adapter->card; 627 card = adapter->card;
630 ASSERT(card->adapters_allocated); 628 ASSERT(card->adapters_allocated);
diff --git a/drivers/staging/sxg/README b/drivers/staging/sxg/README
index 4d1ddbe4c335..d514d1848803 100644
--- a/drivers/staging/sxg/README
+++ b/drivers/staging/sxg/README
@@ -7,6 +7,7 @@ TODO:
7 - remove wrappers 7 - remove wrappers
8 - checkpatch.pl cleanups 8 - checkpatch.pl cleanups
9 - new functionality that the card needs 9 - new functionality that the card needs
10 - remove reliance on x86
10 11
11Please send patches to: 12Please send patches to:
12 Greg Kroah-Hartman <gregkh@suse.de> 13 Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/sxg/sxg.c b/drivers/staging/sxg/sxg.c
index 6ccbee875ab3..5272a18e2043 100644
--- a/drivers/staging/sxg/sxg.c
+++ b/drivers/staging/sxg/sxg.c
@@ -112,12 +112,16 @@ static bool sxg_mac_filter(p_adapter_t adapter,
112static struct net_device_stats *sxg_get_stats(p_net_device dev); 112static struct net_device_stats *sxg_get_stats(p_net_device dev);
113#endif 113#endif
114 114
115#define XXXTODO 0
116
117#if XXXTODO
115static int sxg_mac_set_address(p_net_device dev, void *ptr); 118static int sxg_mac_set_address(p_net_device dev, void *ptr);
119static void sxg_mcast_set_list(p_net_device dev);
120#endif
116 121
117static void sxg_adapter_set_hwaddr(p_adapter_t adapter); 122static void sxg_adapter_set_hwaddr(p_adapter_t adapter);
118 123
119static void sxg_unmap_mmio_space(p_adapter_t adapter); 124static void sxg_unmap_mmio_space(p_adapter_t adapter);
120static void sxg_mcast_set_mask(p_adapter_t adapter);
121 125
122static int sxg_initialize_adapter(p_adapter_t adapter); 126static int sxg_initialize_adapter(p_adapter_t adapter);
123static void sxg_stock_rcv_buffers(p_adapter_t adapter); 127static void sxg_stock_rcv_buffers(p_adapter_t adapter);
@@ -132,9 +136,6 @@ static int sxg_write_mdio_reg(p_adapter_t adapter,
132 u32 DevAddr, u32 RegAddr, u32 Value); 136 u32 DevAddr, u32 RegAddr, u32 Value);
133static int sxg_read_mdio_reg(p_adapter_t adapter, 137static int sxg_read_mdio_reg(p_adapter_t adapter,
134 u32 DevAddr, u32 RegAddr, u32 *pValue); 138 u32 DevAddr, u32 RegAddr, u32 *pValue);
135static void sxg_mcast_set_list(p_net_device dev);
136
137#define XXXTODO 0
138 139
139static unsigned int sxg_first_init = 1; 140static unsigned int sxg_first_init = 1;
140static char *sxg_banner = 141static char *sxg_banner =
@@ -202,7 +203,7 @@ static void sxg_init_driver(void)
202{ 203{
203 if (sxg_first_init) { 204 if (sxg_first_init) {
204 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n", 205 DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
205 __FUNCTION__, jiffies); 206 __func__, jiffies);
206 sxg_first_init = 0; 207 sxg_first_init = 0;
207 spin_lock_init(&sxg_global.driver_lock); 208 spin_lock_init(&sxg_global.driver_lock);
208 } 209 }
@@ -223,7 +224,7 @@ static void sxg_dbg_macaddrs(p_adapter_t adapter)
223 return; 224 return;
224} 225}
225 226
226// SXG Globals 227/* SXG Globals */
227static SXG_DRIVER SxgDriver; 228static SXG_DRIVER SxgDriver;
228 229
229#ifdef ATKDBG 230#ifdef ATKDBG
@@ -250,7 +251,7 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
250 u32 ThisSectionSize; 251 u32 ThisSectionSize;
251 u32 *Instruction = NULL; 252 u32 *Instruction = NULL;
252 u32 BaseAddress, AddressOffset, Address; 253 u32 BaseAddress, AddressOffset, Address;
253// u32 Failure; 254/* u32 Failure; */
254 u32 ValueRead; 255 u32 ValueRead;
255 u32 i; 256 u32 i;
256 u32 numSections = 0; 257 u32 numSections = 0;
@@ -259,10 +260,10 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
259 260
260 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod", 261 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
261 adapter, 0, 0, 0); 262 adapter, 0, 0, 0);
262 DBG_ERROR("sxg: %s ENTER\n", __FUNCTION__); 263 DBG_ERROR("sxg: %s ENTER\n", __func__);
263 264
264 switch (UcodeSel) { 265 switch (UcodeSel) {
265 case SXG_UCODE_SAHARA: // Sahara operational ucode 266 case SXG_UCODE_SAHARA: /* Sahara operational ucode */
266 numSections = SNumSections; 267 numSections = SNumSections;
267 for (i = 0; i < numSections; i++) { 268 for (i = 0; i < numSections; i++) {
268 sectionSize[i] = SSectionSize[i]; 269 sectionSize[i] = SSectionSize[i];
@@ -276,13 +277,13 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
276 } 277 }
277 278
278 DBG_ERROR("sxg: RESET THE CARD\n"); 279 DBG_ERROR("sxg: RESET THE CARD\n");
279 // First, reset the card 280 /* First, reset the card */
280 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH); 281 WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
281 282
282 // Download each section of the microcode as specified in 283 /* Download each section of the microcode as specified in */
283 // its download file. The *download.c file is generated using 284 /* its download file. The *download.c file is generated using */
284 // the saharaobjtoc facility which converts the metastep .obj 285 /* the saharaobjtoc facility which converts the metastep .obj */
285 // file to a .c file which contains a two dimentional array. 286 /* file to a .c file which contains a two dimentional array. */
286 for (Section = 0; Section < numSections; Section++) { 287 for (Section = 0; Section < numSections; Section++) {
287 DBG_ERROR("sxg: SECTION # %d\n", Section); 288 DBG_ERROR("sxg: SECTION # %d\n", Section);
288 switch (UcodeSel) { 289 switch (UcodeSel) {
@@ -294,35 +295,35 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
294 break; 295 break;
295 } 296 }
296 BaseAddress = sectionStart[Section]; 297 BaseAddress = sectionStart[Section];
297 ThisSectionSize = sectionSize[Section] / 12; // Size in instructions 298 ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */
298 for (AddressOffset = 0; AddressOffset < ThisSectionSize; 299 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
299 AddressOffset++) { 300 AddressOffset++) {
300 Address = BaseAddress + AddressOffset; 301 Address = BaseAddress + AddressOffset;
301 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0); 302 ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
302 // Write instruction bits 31 - 0 303 /* Write instruction bits 31 - 0 */
303 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH); 304 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
304 // Write instruction bits 63-32 305 /* Write instruction bits 63-32 */
305 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1), 306 WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
306 FLUSH); 307 FLUSH);
307 // Write instruction bits 95-64 308 /* Write instruction bits 95-64 */
308 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2), 309 WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
309 FLUSH); 310 FLUSH);
310 // Write instruction address with the WRITE bit set 311 /* Write instruction address with the WRITE bit set */
311 WRITE_REG(HwRegs->UcodeAddr, 312 WRITE_REG(HwRegs->UcodeAddr,
312 (Address | MICROCODE_ADDRESS_WRITE), FLUSH); 313 (Address | MICROCODE_ADDRESS_WRITE), FLUSH);
313 // Sahara bug in the ucode download logic - the write to DataLow 314 /* Sahara bug in the ucode download logic - the write to DataLow */
314 // for the next instruction could get corrupted. To avoid this, 315 /* for the next instruction could get corrupted. To avoid this, */
315 // write to DataLow again for this instruction (which may get 316 /* write to DataLow again for this instruction (which may get */
316 // corrupted, but it doesn't matter), then increment the address 317 /* corrupted, but it doesn't matter), then increment the address */
317 // and write the data for the next instruction to DataLow. That 318 /* and write the data for the next instruction to DataLow. That */
318 // write should succeed. 319 /* write should succeed. */
319 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE); 320 WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
320 // Advance 3 u32S to start of next instruction 321 /* Advance 3 u32S to start of next instruction */
321 Instruction += 3; 322 Instruction += 3;
322 } 323 }
323 } 324 }
324 // Now repeat the entire operation reading the instruction back and 325 /* Now repeat the entire operation reading the instruction back and */
325 // checking for parity errors 326 /* checking for parity errors */
326 for (Section = 0; Section < numSections; Section++) { 327 for (Section = 0; Section < numSections; Section++) {
327 DBG_ERROR("sxg: check SECTION # %d\n", Section); 328 DBG_ERROR("sxg: check SECTION # %d\n", Section);
328 switch (UcodeSel) { 329 switch (UcodeSel) {
@@ -334,74 +335,74 @@ static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
334 break; 335 break;
335 } 336 }
336 BaseAddress = sectionStart[Section]; 337 BaseAddress = sectionStart[Section];
337 ThisSectionSize = sectionSize[Section] / 12; // Size in instructions 338 ThisSectionSize = sectionSize[Section] / 12; /* Size in instructions */
338 for (AddressOffset = 0; AddressOffset < ThisSectionSize; 339 for (AddressOffset = 0; AddressOffset < ThisSectionSize;
339 AddressOffset++) { 340 AddressOffset++) {
340 Address = BaseAddress + AddressOffset; 341 Address = BaseAddress + AddressOffset;
341 // Write the address with the READ bit set 342 /* Write the address with the READ bit set */
342 WRITE_REG(HwRegs->UcodeAddr, 343 WRITE_REG(HwRegs->UcodeAddr,
343 (Address | MICROCODE_ADDRESS_READ), FLUSH); 344 (Address | MICROCODE_ADDRESS_READ), FLUSH);
344 // Read it back and check parity bit. 345 /* Read it back and check parity bit. */
345 READ_REG(HwRegs->UcodeAddr, ValueRead); 346 READ_REG(HwRegs->UcodeAddr, ValueRead);
346 if (ValueRead & MICROCODE_ADDRESS_PARITY) { 347 if (ValueRead & MICROCODE_ADDRESS_PARITY) {
347 DBG_ERROR("sxg: %s PARITY ERROR\n", 348 DBG_ERROR("sxg: %s PARITY ERROR\n",
348 __FUNCTION__); 349 __func__);
349 350
350 return (FALSE); // Parity error 351 return (FALSE); /* Parity error */
351 } 352 }
352 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address); 353 ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
353 // Read the instruction back and compare 354 /* Read the instruction back and compare */
354 READ_REG(HwRegs->UcodeDataLow, ValueRead); 355 READ_REG(HwRegs->UcodeDataLow, ValueRead);
355 if (ValueRead != *Instruction) { 356 if (ValueRead != *Instruction) {
356 DBG_ERROR("sxg: %s MISCOMPARE LOW\n", 357 DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
357 __FUNCTION__); 358 __func__);
358 return (FALSE); // Miscompare 359 return (FALSE); /* Miscompare */
359 } 360 }
360 READ_REG(HwRegs->UcodeDataMiddle, ValueRead); 361 READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
361 if (ValueRead != *(Instruction + 1)) { 362 if (ValueRead != *(Instruction + 1)) {
362 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n", 363 DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
363 __FUNCTION__); 364 __func__);
364 return (FALSE); // Miscompare 365 return (FALSE); /* Miscompare */
365 } 366 }
366 READ_REG(HwRegs->UcodeDataHigh, ValueRead); 367 READ_REG(HwRegs->UcodeDataHigh, ValueRead);
367 if (ValueRead != *(Instruction + 2)) { 368 if (ValueRead != *(Instruction + 2)) {
368 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n", 369 DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
369 __FUNCTION__); 370 __func__);
370 return (FALSE); // Miscompare 371 return (FALSE); /* Miscompare */
371 } 372 }
372 // Advance 3 u32S to start of next instruction 373 /* Advance 3 u32S to start of next instruction */
373 Instruction += 3; 374 Instruction += 3;
374 } 375 }
375 } 376 }
376 377
377 // Everything OK, Go. 378 /* Everything OK, Go. */
378 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH); 379 WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
379 380
380 // Poll the CardUp register to wait for microcode to initialize 381 /* Poll the CardUp register to wait for microcode to initialize */
381 // Give up after 10,000 attemps (500ms). 382 /* Give up after 10,000 attemps (500ms). */
382 for (i = 0; i < 10000; i++) { 383 for (i = 0; i < 10000; i++) {
383 udelay(50); 384 udelay(50);
384 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead); 385 READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
385 if (ValueRead == 0xCAFE) { 386 if (ValueRead == 0xCAFE) {
386 DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __FUNCTION__); 387 DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __func__);
387 break; 388 break;
388 } 389 }
389 } 390 }
390 if (i == 10000) { 391 if (i == 10000) {
391 DBG_ERROR("sxg: %s TIMEOUT\n", __FUNCTION__); 392 DBG_ERROR("sxg: %s TIMEOUT\n", __func__);
392 393
393 return (FALSE); // Timeout 394 return (FALSE); /* Timeout */
394 } 395 }
395 // Now write the LoadSync register. This is used to 396 /* Now write the LoadSync register. This is used to */
396 // synchronize with the card so it can scribble on the memory 397 /* synchronize with the card so it can scribble on the memory */
397 // that contained 0xCAFE from the "CardUp" step above 398 /* that contained 0xCAFE from the "CardUp" step above */
398 if (UcodeSel == SXG_UCODE_SAHARA) { 399 if (UcodeSel == SXG_UCODE_SAHARA) {
399 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH); 400 WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
400 } 401 }
401 402
402 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd", 403 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
403 adapter, 0, 0, 0); 404 adapter, 0, 0, 0);
404 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__); 405 DBG_ERROR("sxg: %s EXIT\n", __func__);
405 406
406 return (TRUE); 407 return (TRUE);
407} 408}
@@ -420,29 +421,29 @@ static int sxg_allocate_resources(p_adapter_t adapter)
420 int status; 421 int status;
421 u32 i; 422 u32 i;
422 u32 RssIds, IsrCount; 423 u32 RssIds, IsrCount;
423// PSXG_XMT_RING XmtRing; 424/* PSXG_XMT_RING XmtRing; */
424// PSXG_RCV_RING RcvRing; 425/* PSXG_RCV_RING RcvRing; */
425 426
426 DBG_ERROR("%s ENTER\n", __FUNCTION__); 427 DBG_ERROR("%s ENTER\n", __func__);
427 428
428 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes", 429 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
429 adapter, 0, 0, 0); 430 adapter, 0, 0, 0);
430 431
431 // Windows tells us how many CPUs it plans to use for 432 /* Windows tells us how many CPUs it plans to use for */
432 // RSS 433 /* RSS */
433 RssIds = SXG_RSS_CPU_COUNT(adapter); 434 RssIds = SXG_RSS_CPU_COUNT(adapter);
434 IsrCount = adapter->MsiEnabled ? RssIds : 1; 435 IsrCount = adapter->MsiEnabled ? RssIds : 1;
435 436
436 DBG_ERROR("%s Setup the spinlocks\n", __FUNCTION__); 437 DBG_ERROR("%s Setup the spinlocks\n", __func__);
437 438
438 // Allocate spinlocks and initialize listheads first. 439 /* Allocate spinlocks and initialize listheads first. */
439 spin_lock_init(&adapter->RcvQLock); 440 spin_lock_init(&adapter->RcvQLock);
440 spin_lock_init(&adapter->SglQLock); 441 spin_lock_init(&adapter->SglQLock);
441 spin_lock_init(&adapter->XmtZeroLock); 442 spin_lock_init(&adapter->XmtZeroLock);
442 spin_lock_init(&adapter->Bit64RegLock); 443 spin_lock_init(&adapter->Bit64RegLock);
443 spin_lock_init(&adapter->AdapterLock); 444 spin_lock_init(&adapter->AdapterLock);
444 445
445 DBG_ERROR("%s Setup the lists\n", __FUNCTION__); 446 DBG_ERROR("%s Setup the lists\n", __func__);
446 447
447 InitializeListHead(&adapter->FreeRcvBuffers); 448 InitializeListHead(&adapter->FreeRcvBuffers);
448 InitializeListHead(&adapter->FreeRcvBlocks); 449 InitializeListHead(&adapter->FreeRcvBlocks);
@@ -450,39 +451,39 @@ static int sxg_allocate_resources(p_adapter_t adapter)
450 InitializeListHead(&adapter->FreeSglBuffers); 451 InitializeListHead(&adapter->FreeSglBuffers);
451 InitializeListHead(&adapter->AllSglBuffers); 452 InitializeListHead(&adapter->AllSglBuffers);
452 453
453 // Mark these basic allocations done. This flags essentially 454 /* Mark these basic allocations done. This flags essentially */
454 // tells the SxgFreeResources routine that it can grab spinlocks 455 /* tells the SxgFreeResources routine that it can grab spinlocks */
455 // and reference listheads. 456 /* and reference listheads. */
456 adapter->BasicAllocations = TRUE; 457 adapter->BasicAllocations = TRUE;
457 // Main allocation loop. Start with the maximum supported by 458 /* Main allocation loop. Start with the maximum supported by */
458 // the microcode and back off if memory allocation 459 /* the microcode and back off if memory allocation */
459 // fails. If we hit a minimum, fail. 460 /* fails. If we hit a minimum, fail. */
460 461
461 for (;;) { 462 for (;;) {
462 DBG_ERROR("%s Allocate XmtRings size[%lx]\n", __FUNCTION__, 463 DBG_ERROR("%s Allocate XmtRings size[%x]\n", __func__,
463 (sizeof(SXG_XMT_RING) * 1)); 464 (unsigned int)(sizeof(SXG_XMT_RING) * 1));
464 465
465 // Start with big items first - receive and transmit rings. At the moment 466 /* Start with big items first - receive and transmit rings. At the moment */
466 // I'm going to keep the ring size fixed and adjust the number of 467 /* I'm going to keep the ring size fixed and adjust the number of */
467 // TCBs if we fail. Later we might consider reducing the ring size as well.. 468 /* TCBs if we fail. Later we might consider reducing the ring size as well.. */
468 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev, 469 adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
469 sizeof(SXG_XMT_RING) * 470 sizeof(SXG_XMT_RING) *
470 1, 471 1,
471 &adapter->PXmtRings); 472 &adapter->PXmtRings);
472 DBG_ERROR("%s XmtRings[%p]\n", __FUNCTION__, adapter->XmtRings); 473 DBG_ERROR("%s XmtRings[%p]\n", __func__, adapter->XmtRings);
473 474
474 if (!adapter->XmtRings) { 475 if (!adapter->XmtRings) {
475 goto per_tcb_allocation_failed; 476 goto per_tcb_allocation_failed;
476 } 477 }
477 memset(adapter->XmtRings, 0, sizeof(SXG_XMT_RING) * 1); 478 memset(adapter->XmtRings, 0, sizeof(SXG_XMT_RING) * 1);
478 479
479 DBG_ERROR("%s Allocate RcvRings size[%lx]\n", __FUNCTION__, 480 DBG_ERROR("%s Allocate RcvRings size[%x]\n", __func__,
480 (sizeof(SXG_RCV_RING) * 1)); 481 (unsigned int)(sizeof(SXG_RCV_RING) * 1));
481 adapter->RcvRings = 482 adapter->RcvRings =
482 pci_alloc_consistent(adapter->pcidev, 483 pci_alloc_consistent(adapter->pcidev,
483 sizeof(SXG_RCV_RING) * 1, 484 sizeof(SXG_RCV_RING) * 1,
484 &adapter->PRcvRings); 485 &adapter->PRcvRings);
485 DBG_ERROR("%s RcvRings[%p]\n", __FUNCTION__, adapter->RcvRings); 486 DBG_ERROR("%s RcvRings[%p]\n", __func__, adapter->RcvRings);
486 if (!adapter->RcvRings) { 487 if (!adapter->RcvRings) {
487 goto per_tcb_allocation_failed; 488 goto per_tcb_allocation_failed;
488 } 489 }
@@ -490,7 +491,7 @@ static int sxg_allocate_resources(p_adapter_t adapter)
490 break; 491 break;
491 492
492 per_tcb_allocation_failed: 493 per_tcb_allocation_failed:
493 // an allocation failed. Free any successful allocations. 494 /* an allocation failed. Free any successful allocations. */
494 if (adapter->XmtRings) { 495 if (adapter->XmtRings) {
495 pci_free_consistent(adapter->pcidev, 496 pci_free_consistent(adapter->pcidev,
496 sizeof(SXG_XMT_RING) * 4096, 497 sizeof(SXG_XMT_RING) * 4096,
@@ -505,22 +506,22 @@ static int sxg_allocate_resources(p_adapter_t adapter)
505 adapter->PRcvRings); 506 adapter->PRcvRings);
506 adapter->RcvRings = NULL; 507 adapter->RcvRings = NULL;
507 } 508 }
508 // Loop around and try again.... 509 /* Loop around and try again.... */
509 } 510 }
510 511
511 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __FUNCTION__); 512 DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __func__);
512 // Initialize rcv zero and xmt zero rings 513 /* Initialize rcv zero and xmt zero rings */
513 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE); 514 SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
514 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE); 515 SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
515 516
516 // Sanity check receive data structure format 517 /* Sanity check receive data structure format */
517 ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) || 518 ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
518 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); 519 (adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
519 ASSERT(sizeof(SXG_RCV_DESCRIPTOR_BLOCK) == 520 ASSERT(sizeof(SXG_RCV_DESCRIPTOR_BLOCK) ==
520 SXG_RCV_DESCRIPTOR_BLOCK_SIZE); 521 SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
521 522
522 // Allocate receive data buffers. We allocate a block of buffers and 523 /* Allocate receive data buffers. We allocate a block of buffers and */
523 // a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK 524 /* a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK */
524 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS; 525 for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
525 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) { 526 i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
526 sxg_allocate_buffer_memory(adapter, 527 sxg_allocate_buffer_memory(adapter,
@@ -528,8 +529,8 @@ static int sxg_allocate_resources(p_adapter_t adapter)
528 ReceiveBufferSize), 529 ReceiveBufferSize),
529 SXG_BUFFER_TYPE_RCV); 530 SXG_BUFFER_TYPE_RCV);
530 } 531 }
531 // NBL resource allocation can fail in the 'AllocateComplete' routine, which 532 /* NBL resource allocation can fail in the 'AllocateComplete' routine, which */
532 // doesn't return status. Make sure we got the number of buffers we requested 533 /* doesn't return status. Make sure we got the number of buffers we requested */
533 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) { 534 if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
534 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6", 535 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
535 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES, 536 adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
@@ -537,17 +538,17 @@ static int sxg_allocate_resources(p_adapter_t adapter)
537 return (STATUS_RESOURCES); 538 return (STATUS_RESOURCES);
538 } 539 }
539 540
540 DBG_ERROR("%s Allocate EventRings size[%lx]\n", __FUNCTION__, 541 DBG_ERROR("%s Allocate EventRings size[%x]\n", __func__,
541 (sizeof(SXG_EVENT_RING) * RssIds)); 542 (unsigned int)(sizeof(SXG_EVENT_RING) * RssIds));
542 543
543 // Allocate event queues. 544 /* Allocate event queues. */
544 adapter->EventRings = pci_alloc_consistent(adapter->pcidev, 545 adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
545 sizeof(SXG_EVENT_RING) * 546 sizeof(SXG_EVENT_RING) *
546 RssIds, 547 RssIds,
547 &adapter->PEventRings); 548 &adapter->PEventRings);
548 549
549 if (!adapter->EventRings) { 550 if (!adapter->EventRings) {
550 // Caller will call SxgFreeAdapter to clean up above allocations 551 /* Caller will call SxgFreeAdapter to clean up above allocations */
551 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8", 552 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
552 adapter, SXG_MAX_ENTRIES, 0, 0); 553 adapter, SXG_MAX_ENTRIES, 0, 0);
553 status = STATUS_RESOURCES; 554 status = STATUS_RESOURCES;
@@ -555,12 +556,12 @@ static int sxg_allocate_resources(p_adapter_t adapter)
555 } 556 }
556 memset(adapter->EventRings, 0, sizeof(SXG_EVENT_RING) * RssIds); 557 memset(adapter->EventRings, 0, sizeof(SXG_EVENT_RING) * RssIds);
557 558
558 DBG_ERROR("%s Allocate ISR size[%x]\n", __FUNCTION__, IsrCount); 559 DBG_ERROR("%s Allocate ISR size[%x]\n", __func__, IsrCount);
559 // Allocate ISR 560 /* Allocate ISR */
560 adapter->Isr = pci_alloc_consistent(adapter->pcidev, 561 adapter->Isr = pci_alloc_consistent(adapter->pcidev,
561 IsrCount, &adapter->PIsr); 562 IsrCount, &adapter->PIsr);
562 if (!adapter->Isr) { 563 if (!adapter->Isr) {
563 // Caller will call SxgFreeAdapter to clean up above allocations 564 /* Caller will call SxgFreeAdapter to clean up above allocations */
564 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9", 565 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
565 adapter, SXG_MAX_ENTRIES, 0, 0); 566 adapter, SXG_MAX_ENTRIES, 0, 0);
566 status = STATUS_RESOURCES; 567 status = STATUS_RESOURCES;
@@ -568,10 +569,10 @@ static int sxg_allocate_resources(p_adapter_t adapter)
568 } 569 }
569 memset(adapter->Isr, 0, sizeof(u32) * IsrCount); 570 memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
570 571
571 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%lx]\n", 572 DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
572 __FUNCTION__, sizeof(u32)); 573 __func__, (unsigned int)sizeof(u32));
573 574
574 // Allocate shared XMT ring zero index location 575 /* Allocate shared XMT ring zero index location */
575 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev, 576 adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
576 sizeof(u32), 577 sizeof(u32),
577 &adapter-> 578 &adapter->
@@ -587,7 +588,7 @@ static int sxg_allocate_resources(p_adapter_t adapter)
587 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS", 588 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
588 adapter, SXG_MAX_ENTRIES, 0, 0); 589 adapter, SXG_MAX_ENTRIES, 0, 0);
589 590
590 DBG_ERROR("%s EXIT\n", __FUNCTION__); 591 DBG_ERROR("%s EXIT\n", __func__);
591 return (STATUS_SUCCESS); 592 return (STATUS_SUCCESS);
592} 593}
593 594
@@ -606,17 +607,17 @@ static void sxg_config_pci(struct pci_dev *pcidev)
606 u16 new_command; 607 u16 new_command;
607 608
608 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command); 609 pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
609 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __FUNCTION__, pci_command); 610 DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __func__, pci_command);
610 // Set the command register 611 /* Set the command register */
611 new_command = pci_command | (PCI_COMMAND_MEMORY | // Memory Space Enable 612 new_command = pci_command | (PCI_COMMAND_MEMORY | /* Memory Space Enable */
612 PCI_COMMAND_MASTER | // Bus master enable 613 PCI_COMMAND_MASTER | /* Bus master enable */
613 PCI_COMMAND_INVALIDATE | // Memory write and invalidate 614 PCI_COMMAND_INVALIDATE | /* Memory write and invalidate */
614 PCI_COMMAND_PARITY | // Parity error response 615 PCI_COMMAND_PARITY | /* Parity error response */
615 PCI_COMMAND_SERR | // System ERR 616 PCI_COMMAND_SERR | /* System ERR */
616 PCI_COMMAND_FAST_BACK); // Fast back-to-back 617 PCI_COMMAND_FAST_BACK); /* Fast back-to-back */
617 if (pci_command != new_command) { 618 if (pci_command != new_command) {
618 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n", 619 DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
619 __FUNCTION__, pci_command, new_command); 620 __func__, pci_command, new_command);
620 pci_write_config_word(pcidev, PCI_COMMAND, new_command); 621 pci_write_config_word(pcidev, PCI_COMMAND, new_command);
621 } 622 }
622} 623}
@@ -634,9 +635,9 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
634 ulong mmio_len = 0; 635 ulong mmio_len = 0;
635 636
636 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n", 637 DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
637 __FUNCTION__, jiffies, smp_processor_id()); 638 __func__, jiffies, smp_processor_id());
638 639
639 // Initialize trace buffer 640 /* Initialize trace buffer */
640#ifdef ATKDBG 641#ifdef ATKDBG
641 SxgTraceBuffer = &LSxgTraceBuffer; 642 SxgTraceBuffer = &LSxgTraceBuffer;
642 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY); 643 SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
@@ -701,11 +702,11 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
701 mmio_start, mmio_len); 702 mmio_start, mmio_len);
702 703
703 memmapped_ioaddr = ioremap(mmio_start, mmio_len); 704 memmapped_ioaddr = ioremap(mmio_start, mmio_len);
704 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __FUNCTION__, 705 DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__,
705 memmapped_ioaddr); 706 memmapped_ioaddr);
706 if (!memmapped_ioaddr) { 707 if (!memmapped_ioaddr) {
707 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n", 708 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
708 __FUNCTION__, mmio_len, mmio_start); 709 __func__, mmio_len, mmio_start);
709 goto err_out_free_mmio_region; 710 goto err_out_free_mmio_region;
710 } 711 }
711 712
@@ -727,7 +728,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
727 memmapped_ioaddr); 728 memmapped_ioaddr);
728 if (!memmapped_ioaddr) { 729 if (!memmapped_ioaddr) {
729 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n", 730 DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
730 __FUNCTION__, mmio_len, mmio_start); 731 __func__, mmio_len, mmio_start);
731 goto err_out_free_mmio_region; 732 goto err_out_free_mmio_region;
732 } 733 }
733 734
@@ -738,13 +739,13 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
738 adapter->UcodeRegs = (void *)memmapped_ioaddr; 739 adapter->UcodeRegs = (void *)memmapped_ioaddr;
739 740
740 adapter->State = SXG_STATE_INITIALIZING; 741 adapter->State = SXG_STATE_INITIALIZING;
741 // Maintain a list of all adapters anchored by 742 /* Maintain a list of all adapters anchored by */
742 // the global SxgDriver structure. 743 /* the global SxgDriver structure. */
743 adapter->Next = SxgDriver.Adapters; 744 adapter->Next = SxgDriver.Adapters;
744 SxgDriver.Adapters = adapter; 745 SxgDriver.Adapters = adapter;
745 adapter->AdapterID = ++SxgDriver.AdapterID; 746 adapter->AdapterID = ++SxgDriver.AdapterID;
746 747
747 // Initialize CRC table used to determine multicast hash 748 /* Initialize CRC table used to determine multicast hash */
748 sxg_mcast_init_crc32(); 749 sxg_mcast_init_crc32();
749 750
750 adapter->JumboEnabled = FALSE; 751 adapter->JumboEnabled = FALSE;
@@ -757,18 +758,18 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
757 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE; 758 adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
758 } 759 }
759 760
760// status = SXG_READ_EEPROM(adapter); 761/* status = SXG_READ_EEPROM(adapter); */
761// if (!status) { 762/* if (!status) { */
762// goto sxg_init_bad; 763/* goto sxg_init_bad; */
763// } 764/* } */
764 765
765 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __FUNCTION__); 766 DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __func__);
766 sxg_config_pci(pcidev); 767 sxg_config_pci(pcidev);
767 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __FUNCTION__); 768 DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __func__);
768 769
769 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __FUNCTION__); 770 DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __func__);
770 sxg_init_driver(); 771 sxg_init_driver();
771 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __FUNCTION__); 772 DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __func__);
772 773
773 adapter->vendid = pci_tbl_entry->vendor; 774 adapter->vendid = pci_tbl_entry->vendor;
774 adapter->devid = pci_tbl_entry->device; 775 adapter->devid = pci_tbl_entry->device;
@@ -780,23 +781,23 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
780 adapter->irq = pcidev->irq; 781 adapter->irq = pcidev->irq;
781 adapter->next_netdevice = head_netdevice; 782 adapter->next_netdevice = head_netdevice;
782 head_netdevice = netdev; 783 head_netdevice = netdev;
783// adapter->chipid = chip_idx; 784/* adapter->chipid = chip_idx; */
784 adapter->port = 0; //adapter->functionnumber; 785 adapter->port = 0; /*adapter->functionnumber; */
785 adapter->cardindex = adapter->port; 786 adapter->cardindex = adapter->port;
786 787
787 // Allocate memory and other resources 788 /* Allocate memory and other resources */
788 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __FUNCTION__); 789 DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __func__);
789 status = sxg_allocate_resources(adapter); 790 status = sxg_allocate_resources(adapter);
790 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n", 791 DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
791 __FUNCTION__, status); 792 __func__, status);
792 if (status != STATUS_SUCCESS) { 793 if (status != STATUS_SUCCESS) {
793 goto err_out_unmap; 794 goto err_out_unmap;
794 } 795 }
795 796
796 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__); 797 DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __func__);
797 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) { 798 if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
798 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n", 799 DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
799 __FUNCTION__); 800 __func__);
800 sxg_adapter_set_hwaddr(adapter); 801 sxg_adapter_set_hwaddr(adapter);
801 } else { 802 } else {
802 adapter->state = ADAPT_FAIL; 803 adapter->state = ADAPT_FAIL;
@@ -819,7 +820,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
819#endif 820#endif
820 821
821 strcpy(netdev->name, "eth%d"); 822 strcpy(netdev->name, "eth%d");
822// strcpy(netdev->name, pci_name(pcidev)); 823/* strcpy(netdev->name, pci_name(pcidev)); */
823 if ((err = register_netdev(netdev))) { 824 if ((err = register_netdev(netdev))) {
824 DBG_ERROR("Cannot register net device, aborting. %s\n", 825 DBG_ERROR("Cannot register net device, aborting. %s\n",
825 netdev->name); 826 netdev->name);
@@ -832,11 +833,11 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
832 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3], 833 netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
833 netdev->dev_addr[4], netdev->dev_addr[5]); 834 netdev->dev_addr[4], netdev->dev_addr[5]);
834 835
835//sxg_init_bad: 836/*sxg_init_bad: */
836 ASSERT(status == FALSE); 837 ASSERT(status == FALSE);
837// sxg_free_adapter(adapter); 838/* sxg_free_adapter(adapter); */
838 839
839 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __FUNCTION__, 840 DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __func__,
840 status, jiffies, smp_processor_id()); 841 status, jiffies, smp_processor_id());
841 return status; 842 return status;
842 843
@@ -848,7 +849,7 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
848 849
849 err_out_exit_sxg_probe: 850 err_out_exit_sxg_probe:
850 851
851 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __FUNCTION__, jiffies, 852 DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __func__, jiffies,
852 smp_processor_id()); 853 smp_processor_id());
853 854
854 return -ENODEV; 855 return -ENODEV;
@@ -874,12 +875,12 @@ static void sxg_disable_interrupt(p_adapter_t adapter)
874{ 875{
875 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr", 876 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
876 adapter, adapter->InterruptsEnabled, 0, 0); 877 adapter, adapter->InterruptsEnabled, 0, 0);
877 // For now, RSS is disabled with line based interrupts 878 /* For now, RSS is disabled with line based interrupts */
878 ASSERT(adapter->RssEnabled == FALSE); 879 ASSERT(adapter->RssEnabled == FALSE);
879 ASSERT(adapter->MsiEnabled == FALSE); 880 ASSERT(adapter->MsiEnabled == FALSE);
880 // 881 /* */
881 // Turn off interrupts by writing to the icr register. 882 /* Turn off interrupts by writing to the icr register. */
882 // 883 /* */
883 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE); 884 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
884 885
885 adapter->InterruptsEnabled = 0; 886 adapter->InterruptsEnabled = 0;
@@ -905,12 +906,12 @@ static void sxg_enable_interrupt(p_adapter_t adapter)
905{ 906{
906 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr", 907 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
907 adapter, adapter->InterruptsEnabled, 0, 0); 908 adapter, adapter->InterruptsEnabled, 0, 0);
908 // For now, RSS is disabled with line based interrupts 909 /* For now, RSS is disabled with line based interrupts */
909 ASSERT(adapter->RssEnabled == FALSE); 910 ASSERT(adapter->RssEnabled == FALSE);
910 ASSERT(adapter->MsiEnabled == FALSE); 911 ASSERT(adapter->MsiEnabled == FALSE);
911 // 912 /* */
912 // Turn on interrupts by writing to the icr register. 913 /* Turn on interrupts by writing to the icr register. */
913 // 914 /* */
914 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE); 915 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
915 916
916 adapter->InterruptsEnabled = 1; 917 adapter->InterruptsEnabled = 1;
@@ -935,29 +936,29 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
935{ 936{
936 p_net_device dev = (p_net_device) dev_id; 937 p_net_device dev = (p_net_device) dev_id;
937 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 938 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
938// u32 CpuMask = 0, i; 939/* u32 CpuMask = 0, i; */
939 940
940 adapter->Stats.NumInts++; 941 adapter->Stats.NumInts++;
941 if (adapter->Isr[0] == 0) { 942 if (adapter->Isr[0] == 0) {
942 // The SLIC driver used to experience a number of spurious interrupts 943 /* The SLIC driver used to experience a number of spurious interrupts */
943 // due to the delay associated with the masking of the interrupt 944 /* due to the delay associated with the masking of the interrupt */
944 // (we'd bounce back in here). If we see that again with Sahara, 945 /* (we'd bounce back in here). If we see that again with Sahara, */
945 // add a READ_REG of the Icr register after the WRITE_REG below. 946 /* add a READ_REG of the Icr register after the WRITE_REG below. */
946 adapter->Stats.FalseInts++; 947 adapter->Stats.FalseInts++;
947 return IRQ_NONE; 948 return IRQ_NONE;
948 } 949 }
949 // 950 /* */
950 // Move the Isr contents and clear the value in 951 /* Move the Isr contents and clear the value in */
951 // shared memory, and mask interrupts 952 /* shared memory, and mask interrupts */
952 // 953 /* */
953 adapter->IsrCopy[0] = adapter->Isr[0]; 954 adapter->IsrCopy[0] = adapter->Isr[0];
954 adapter->Isr[0] = 0; 955 adapter->Isr[0] = 0;
955 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE); 956 WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
956// ASSERT(adapter->IsrDpcsPending == 0); 957/* ASSERT(adapter->IsrDpcsPending == 0); */
957#if XXXTODO // RSS Stuff 958#if XXXTODO /* RSS Stuff */
958 // If RSS is enabled and the ISR specifies 959 /* If RSS is enabled and the ISR specifies */
959 // SXG_ISR_EVENT, then schedule DPC's 960 /* SXG_ISR_EVENT, then schedule DPC's */
960 // based on event queues. 961 /* based on event queues. */
961 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) { 962 if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
962 for (i = 0; 963 for (i = 0;
963 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount; 964 i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
@@ -973,8 +974,8 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
973 } 974 }
974 } 975 }
975 } 976 }
976 // Now, either schedule the CPUs specified by the CpuMask, 977 /* Now, either schedule the CPUs specified by the CpuMask, */
977 // or queue default 978 /* or queue default */
978 if (CpuMask) { 979 if (CpuMask) {
979 *QueueDefault = FALSE; 980 *QueueDefault = FALSE;
980 } else { 981 } else {
@@ -983,9 +984,9 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
983 } 984 }
984 *TargetCpus = CpuMask; 985 *TargetCpus = CpuMask;
985#endif 986#endif
986 // 987 /* */
987 // There are no DPCs in Linux, so call the handler now 988 /* There are no DPCs in Linux, so call the handler now */
988 // 989 /* */
989 sxg_handle_interrupt(adapter); 990 sxg_handle_interrupt(adapter);
990 991
991 return IRQ_HANDLED; 992 return IRQ_HANDLED;
@@ -993,7 +994,7 @@ static irqreturn_t sxg_isr(int irq, void *dev_id)
993 994
994static void sxg_handle_interrupt(p_adapter_t adapter) 995static void sxg_handle_interrupt(p_adapter_t adapter)
995{ 996{
996// unsigned char RssId = 0; 997/* unsigned char RssId = 0; */
997 u32 NewIsr; 998 u32 NewIsr;
998 999
999 if (adapter->Stats.RcvNoBuffer < 5) { 1000 if (adapter->Stats.RcvNoBuffer < 5) {
@@ -1002,32 +1003,32 @@ static void sxg_handle_interrupt(p_adapter_t adapter)
1002 } 1003 }
1003 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr", 1004 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
1004 adapter, adapter->IsrCopy[0], 0, 0); 1005 adapter, adapter->IsrCopy[0], 0, 0);
1005 // For now, RSS is disabled with line based interrupts 1006 /* For now, RSS is disabled with line based interrupts */
1006 ASSERT(adapter->RssEnabled == FALSE); 1007 ASSERT(adapter->RssEnabled == FALSE);
1007 ASSERT(adapter->MsiEnabled == FALSE); 1008 ASSERT(adapter->MsiEnabled == FALSE);
1008 ASSERT(adapter->IsrCopy[0]); 1009 ASSERT(adapter->IsrCopy[0]);
1009///////////////////////////// 1010/*/////////////////////////// */
1010 1011
1011 // Always process the event queue. 1012 /* Always process the event queue. */
1012 sxg_process_event_queue(adapter, 1013 sxg_process_event_queue(adapter,
1013 (adapter->RssEnabled ? /*RssId */ 0 : 0)); 1014 (adapter->RssEnabled ? /*RssId */ 0 : 0));
1014 1015
1015#if XXXTODO // RSS stuff 1016#if XXXTODO /* RSS stuff */
1016 if (--adapter->IsrDpcsPending) { 1017 if (--adapter->IsrDpcsPending) {
1017 // We're done. 1018 /* We're done. */
1018 ASSERT(adapter->RssEnabled); 1019 ASSERT(adapter->RssEnabled);
1019 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend", 1020 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
1020 adapter, 0, 0, 0); 1021 adapter, 0, 0, 0);
1021 return; 1022 return;
1022 } 1023 }
1023#endif 1024#endif
1024 // 1025 /* */
1025 // Last (or only) DPC processes the ISR and clears the interrupt. 1026 /* Last (or only) DPC processes the ISR and clears the interrupt. */
1026 // 1027 /* */
1027 NewIsr = sxg_process_isr(adapter, 0); 1028 NewIsr = sxg_process_isr(adapter, 0);
1028 // 1029 /* */
1029 // Reenable interrupts 1030 /* Reenable interrupts */
1030 // 1031 /* */
1031 adapter->IsrCopy[0] = 0; 1032 adapter->IsrCopy[0] = 0;
1032 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr", 1033 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
1033 adapter, NewIsr, 0, 0); 1034 adapter, NewIsr, 0, 0);
@@ -1063,75 +1064,75 @@ static int sxg_process_isr(p_adapter_t adapter, u32 MessageId)
1063 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr", 1064 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
1064 adapter, Isr, 0, 0); 1065 adapter, Isr, 0, 0);
1065 1066
1066 // Error 1067 /* Error */
1067 if (Isr & SXG_ISR_ERR) { 1068 if (Isr & SXG_ISR_ERR) {
1068 if (Isr & SXG_ISR_PDQF) { 1069 if (Isr & SXG_ISR_PDQF) {
1069 adapter->Stats.PdqFull++; 1070 adapter->Stats.PdqFull++;
1070 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __FUNCTION__); 1071 DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __func__);
1071 } 1072 }
1072 // No host buffer 1073 /* No host buffer */
1073 if (Isr & SXG_ISR_RMISS) { 1074 if (Isr & SXG_ISR_RMISS) {
1074 // There is a bunch of code in the SLIC driver which 1075 /* There is a bunch of code in the SLIC driver which */
1075 // attempts to process more receive events per DPC 1076 /* attempts to process more receive events per DPC */
1076 // if we start to fall behind. We'll probably 1077 /* if we start to fall behind. We'll probably */
1077 // need to do something similar here, but hold 1078 /* need to do something similar here, but hold */
1078 // off for now. I don't want to make the code more 1079 /* off for now. I don't want to make the code more */
1079 // complicated than strictly needed. 1080 /* complicated than strictly needed. */
1080 adapter->Stats.RcvNoBuffer++; 1081 adapter->Stats.RcvNoBuffer++;
1081 if (adapter->Stats.RcvNoBuffer < 5) { 1082 if (adapter->Stats.RcvNoBuffer < 5) {
1082 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n", 1083 DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
1083 __FUNCTION__); 1084 __func__);
1084 } 1085 }
1085 } 1086 }
1086 // Card crash 1087 /* Card crash */
1087 if (Isr & SXG_ISR_DEAD) { 1088 if (Isr & SXG_ISR_DEAD) {
1088 // Set aside the crash info and set the adapter state to RESET 1089 /* Set aside the crash info and set the adapter state to RESET */
1089 adapter->CrashCpu = 1090 adapter->CrashCpu =
1090 (unsigned char)((Isr & SXG_ISR_CPU) >> 1091 (unsigned char)((Isr & SXG_ISR_CPU) >>
1091 SXG_ISR_CPU_SHIFT); 1092 SXG_ISR_CPU_SHIFT);
1092 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH); 1093 adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
1093 adapter->Dead = TRUE; 1094 adapter->Dead = TRUE;
1094 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __FUNCTION__, 1095 DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __func__,
1095 adapter->CrashLocation, adapter->CrashCpu); 1096 adapter->CrashLocation, adapter->CrashCpu);
1096 } 1097 }
1097 // Event ring full 1098 /* Event ring full */
1098 if (Isr & SXG_ISR_ERFULL) { 1099 if (Isr & SXG_ISR_ERFULL) {
1099 // Same issue as RMISS, really. This means the 1100 /* Same issue as RMISS, really. This means the */
1100 // host is falling behind the card. Need to increase 1101 /* host is falling behind the card. Need to increase */
1101 // event ring size, process more events per interrupt, 1102 /* event ring size, process more events per interrupt, */
1102 // and/or reduce/remove interrupt aggregation. 1103 /* and/or reduce/remove interrupt aggregation. */
1103 adapter->Stats.EventRingFull++; 1104 adapter->Stats.EventRingFull++;
1104 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n", 1105 DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
1105 __FUNCTION__); 1106 __func__);
1106 } 1107 }
1107 // Transmit drop - no DRAM buffers or XMT error 1108 /* Transmit drop - no DRAM buffers or XMT error */
1108 if (Isr & SXG_ISR_XDROP) { 1109 if (Isr & SXG_ISR_XDROP) {
1109 adapter->Stats.XmtDrops++; 1110 adapter->Stats.XmtDrops++;
1110 adapter->Stats.XmtErrors++; 1111 adapter->Stats.XmtErrors++;
1111 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __FUNCTION__); 1112 DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __func__);
1112 } 1113 }
1113 } 1114 }
1114 // Slowpath send completions 1115 /* Slowpath send completions */
1115 if (Isr & SXG_ISR_SPSEND) { 1116 if (Isr & SXG_ISR_SPSEND) {
1116 sxg_complete_slow_send(adapter); 1117 sxg_complete_slow_send(adapter);
1117 } 1118 }
1118 // Dump 1119 /* Dump */
1119 if (Isr & SXG_ISR_UPC) { 1120 if (Isr & SXG_ISR_UPC) {
1120 ASSERT(adapter->DumpCmdRunning); // Maybe change when debug is added.. 1121 ASSERT(adapter->DumpCmdRunning); /* Maybe change when debug is added.. */
1121 adapter->DumpCmdRunning = FALSE; 1122 adapter->DumpCmdRunning = FALSE;
1122 } 1123 }
1123 // Link event 1124 /* Link event */
1124 if (Isr & SXG_ISR_LINK) { 1125 if (Isr & SXG_ISR_LINK) {
1125 sxg_link_event(adapter); 1126 sxg_link_event(adapter);
1126 } 1127 }
1127 // Debug - breakpoint hit 1128 /* Debug - breakpoint hit */
1128 if (Isr & SXG_ISR_BREAK) { 1129 if (Isr & SXG_ISR_BREAK) {
1129 // At the moment AGDB isn't written to support interactive 1130 /* At the moment AGDB isn't written to support interactive */
1130 // debug sessions. When it is, this interrupt will be used 1131 /* debug sessions. When it is, this interrupt will be used */
1131 // to signal AGDB that it has hit a breakpoint. For now, ASSERT. 1132 /* to signal AGDB that it has hit a breakpoint. For now, ASSERT. */
1132 ASSERT(0); 1133 ASSERT(0);
1133 } 1134 }
1134 // Heartbeat response 1135 /* Heartbeat response */
1135 if (Isr & SXG_ISR_PING) { 1136 if (Isr & SXG_ISR_PING) {
1136 adapter->PingOutstanding = FALSE; 1137 adapter->PingOutstanding = FALSE;
1137 } 1138 }
@@ -1171,39 +1172,39 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
1171 (adapter->State == SXG_STATE_PAUSING) || 1172 (adapter->State == SXG_STATE_PAUSING) ||
1172 (adapter->State == SXG_STATE_PAUSED) || 1173 (adapter->State == SXG_STATE_PAUSED) ||
1173 (adapter->State == SXG_STATE_HALTING)); 1174 (adapter->State == SXG_STATE_HALTING));
1174 // We may still have unprocessed events on the queue if 1175 /* We may still have unprocessed events on the queue if */
1175 // the card crashed. Don't process them. 1176 /* the card crashed. Don't process them. */
1176 if (adapter->Dead) { 1177 if (adapter->Dead) {
1177 return (0); 1178 return (0);
1178 } 1179 }
1179 // In theory there should only be a single processor that 1180 /* In theory there should only be a single processor that */
1180 // accesses this queue, and only at interrupt-DPC time. So 1181 /* accesses this queue, and only at interrupt-DPC time. So */
1181 // we shouldn't need a lock for any of this. 1182 /* we shouldn't need a lock for any of this. */
1182 while (Event->Status & EVENT_STATUS_VALID) { 1183 while (Event->Status & EVENT_STATUS_VALID) {
1183 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event", 1184 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
1184 Event, Event->Code, Event->Status, 1185 Event, Event->Code, Event->Status,
1185 adapter->NextEvent); 1186 adapter->NextEvent);
1186 switch (Event->Code) { 1187 switch (Event->Code) {
1187 case EVENT_CODE_BUFFERS: 1188 case EVENT_CODE_BUFFERS:
1188 ASSERT(!(Event->CommandIndex & 0xFF00)); // SXG_RING_INFO Head & Tail == unsigned char 1189 ASSERT(!(Event->CommandIndex & 0xFF00)); /* SXG_RING_INFO Head & Tail == unsigned char */
1189 // 1190 /* */
1190 sxg_complete_descriptor_blocks(adapter, 1191 sxg_complete_descriptor_blocks(adapter,
1191 Event->CommandIndex); 1192 Event->CommandIndex);
1192 // 1193 /* */
1193 break; 1194 break;
1194 case EVENT_CODE_SLOWRCV: 1195 case EVENT_CODE_SLOWRCV:
1195 --adapter->RcvBuffersOnCard; 1196 --adapter->RcvBuffersOnCard;
1196 if ((skb = sxg_slow_receive(adapter, Event))) { 1197 if ((skb = sxg_slow_receive(adapter, Event))) {
1197 u32 rx_bytes; 1198 u32 rx_bytes;
1198#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS 1199#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1199 // Add it to our indication list 1200 /* Add it to our indication list */
1200 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb, 1201 SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
1201 IndicationList, num_skbs); 1202 IndicationList, num_skbs);
1202 // In Linux, we just pass up each skb to the protocol above at this point, 1203 /* In Linux, we just pass up each skb to the protocol above at this point, */
1203 // there is no capability of an indication list. 1204 /* there is no capability of an indication list. */
1204#else 1205#else
1205// CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); 1206/* CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE); */
1206 rx_bytes = Event->Length; // (rcvbuf->length & IRHDDR_FLEN_MSK); 1207 rx_bytes = Event->Length; /* (rcvbuf->length & IRHDDR_FLEN_MSK); */
1207 skb_put(skb, rx_bytes); 1208 skb_put(skb, rx_bytes);
1208 adapter->stats.rx_packets++; 1209 adapter->stats.rx_packets++;
1209 adapter->stats.rx_bytes += rx_bytes; 1210 adapter->stats.rx_bytes += rx_bytes;
@@ -1218,43 +1219,43 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
1218 break; 1219 break;
1219 default: 1220 default:
1220 DBG_ERROR("%s: ERROR Invalid EventCode %d\n", 1221 DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
1221 __FUNCTION__, Event->Code); 1222 __func__, Event->Code);
1222// ASSERT(0); 1223/* ASSERT(0); */
1223 } 1224 }
1224 // See if we need to restock card receive buffers. 1225 /* See if we need to restock card receive buffers. */
1225 // There are two things to note here: 1226 /* There are two things to note here: */
1226 // First - This test is not SMP safe. The 1227 /* First - This test is not SMP safe. The */
1227 // adapter->BuffersOnCard field is protected via atomic interlocked calls, but 1228 /* adapter->BuffersOnCard field is protected via atomic interlocked calls, but */
1228 // we do not protect it with respect to these tests. The only way to do that 1229 /* we do not protect it with respect to these tests. The only way to do that */
1229 // is with a lock, and I don't want to grab a lock every time we adjust the 1230 /* is with a lock, and I don't want to grab a lock every time we adjust the */
1230 // BuffersOnCard count. Instead, we allow the buffer replenishment to be off 1231 /* BuffersOnCard count. Instead, we allow the buffer replenishment to be off */
1231 // once in a while. The worst that can happen is the card is given one 1232 /* once in a while. The worst that can happen is the card is given one */
1232 // more-or-less descriptor block than the arbitrary value we've chosen. 1233 /* more-or-less descriptor block than the arbitrary value we've chosen. */
1233 // No big deal 1234 /* No big deal */
1234 // In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted. 1235 /* In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted. */
1235 // Second - We expect this test to rarely evaluate to true. We attempt to 1236 /* Second - We expect this test to rarely evaluate to true. We attempt to */
1236 // refill descriptor blocks as they are returned to us 1237 /* refill descriptor blocks as they are returned to us */
1237 // (sxg_complete_descriptor_blocks), so The only time this should evaluate 1238 /* (sxg_complete_descriptor_blocks), so The only time this should evaluate */
1238 // to true is when sxg_complete_descriptor_blocks failed to allocate 1239 /* to true is when sxg_complete_descriptor_blocks failed to allocate */
1239 // receive buffers. 1240 /* receive buffers. */
1240 if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) { 1241 if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
1241 sxg_stock_rcv_buffers(adapter); 1242 sxg_stock_rcv_buffers(adapter);
1242 } 1243 }
1243 // It's more efficient to just set this to zero. 1244 /* It's more efficient to just set this to zero. */
1244 // But clearing the top bit saves potential debug info... 1245 /* But clearing the top bit saves potential debug info... */
1245 Event->Status &= ~EVENT_STATUS_VALID; 1246 Event->Status &= ~EVENT_STATUS_VALID;
1246 // Advanct to the next event 1247 /* Advanct to the next event */
1247 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE); 1248 SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
1248 Event = &EventRing->Ring[adapter->NextEvent[RssId]]; 1249 Event = &EventRing->Ring[adapter->NextEvent[RssId]];
1249 EventsProcessed++; 1250 EventsProcessed++;
1250 if (EventsProcessed == EVENT_RING_BATCH) { 1251 if (EventsProcessed == EVENT_RING_BATCH) {
1251 // Release a batch of events back to the card 1252 /* Release a batch of events back to the card */
1252 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease, 1253 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1253 EVENT_RING_BATCH, FALSE); 1254 EVENT_RING_BATCH, FALSE);
1254 EventsProcessed = 0; 1255 EventsProcessed = 0;
1255 // If we've processed our batch limit, break out of the 1256 /* If we've processed our batch limit, break out of the */
1256 // loop and return SXG_ISR_EVENT to arrange for us to 1257 /* loop and return SXG_ISR_EVENT to arrange for us to */
1257 // be called again 1258 /* be called again */
1258 if (Batches++ == EVENT_BATCH_LIMIT) { 1259 if (Batches++ == EVENT_BATCH_LIMIT) {
1259 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, 1260 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1260 TRACE_NOISY, "EvtLimit", Batches, 1261 TRACE_NOISY, "EvtLimit", Batches,
@@ -1265,14 +1266,14 @@ static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
1265 } 1266 }
1266 } 1267 }
1267#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS 1268#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
1268 // 1269 /* */
1269 // Indicate any received dumb-nic frames 1270 /* Indicate any received dumb-nic frames */
1270 // 1271 /* */
1271 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs); 1272 SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
1272#endif 1273#endif
1273 // 1274 /* */
1274 // Release events back to the card. 1275 /* Release events back to the card. */
1275 // 1276 /* */
1276 if (EventsProcessed) { 1277 if (EventsProcessed) {
1277 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease, 1278 WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
1278 EventsProcessed, FALSE); 1279 EventsProcessed, FALSE);
@@ -1299,43 +1300,43 @@ static void sxg_complete_slow_send(p_adapter_t adapter)
1299 u32 *ContextType; 1300 u32 *ContextType;
1300 PSXG_CMD XmtCmd; 1301 PSXG_CMD XmtCmd;
1301 1302
1302 // NOTE - This lock is dropped and regrabbed in this loop. 1303 /* NOTE - This lock is dropped and regrabbed in this loop. */
1303 // This means two different processors can both be running 1304 /* This means two different processors can both be running */
1304 // through this loop. Be *very* careful. 1305 /* through this loop. Be *very* careful. */
1305 spin_lock(&adapter->XmtZeroLock); 1306 spin_lock(&adapter->XmtZeroLock);
1306 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds", 1307 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
1307 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 1308 adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
1308 1309
1309 while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) { 1310 while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) {
1310 // Locate the current Cmd (ring descriptor entry), and 1311 /* Locate the current Cmd (ring descriptor entry), and */
1311 // associated SGL, and advance the tail 1312 /* associated SGL, and advance the tail */
1312 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType); 1313 SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
1313 ASSERT(ContextType); 1314 ASSERT(ContextType);
1314 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd", 1315 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
1315 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0); 1316 XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
1316 // Clear the SGL field. 1317 /* Clear the SGL field. */
1317 XmtCmd->Sgl = 0; 1318 XmtCmd->Sgl = 0;
1318 1319
1319 switch (*ContextType) { 1320 switch (*ContextType) {
1320 case SXG_SGL_DUMB: 1321 case SXG_SGL_DUMB:
1321 { 1322 {
1322 struct sk_buff *skb; 1323 struct sk_buff *skb;
1323 // Dumb-nic send. Command context is the dumb-nic SGL 1324 /* Dumb-nic send. Command context is the dumb-nic SGL */
1324 skb = (struct sk_buff *)ContextType; 1325 skb = (struct sk_buff *)ContextType;
1325 // Complete the send 1326 /* Complete the send */
1326 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, 1327 SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
1327 TRACE_IMPORTANT, "DmSndCmp", skb, 0, 1328 TRACE_IMPORTANT, "DmSndCmp", skb, 0,
1328 0, 0); 1329 0, 0);
1329 ASSERT(adapter->Stats.XmtQLen); 1330 ASSERT(adapter->Stats.XmtQLen);
1330 adapter->Stats.XmtQLen--; // within XmtZeroLock 1331 adapter->Stats.XmtQLen--; /* within XmtZeroLock */
1331 adapter->Stats.XmtOk++; 1332 adapter->Stats.XmtOk++;
1332 // Now drop the lock and complete the send back to 1333 /* Now drop the lock and complete the send back to */
1333 // Microsoft. We need to drop the lock because 1334 /* Microsoft. We need to drop the lock because */
1334 // Microsoft can come back with a chimney send, which 1335 /* Microsoft can come back with a chimney send, which */
1335 // results in a double trip in SxgTcpOuput 1336 /* results in a double trip in SxgTcpOuput */
1336 spin_unlock(&adapter->XmtZeroLock); 1337 spin_unlock(&adapter->XmtZeroLock);
1337 SXG_COMPLETE_DUMB_SEND(adapter, skb); 1338 SXG_COMPLETE_DUMB_SEND(adapter, skb);
1338 // and reacquire.. 1339 /* and reacquire.. */
1339 spin_lock(&adapter->XmtZeroLock); 1340 spin_lock(&adapter->XmtZeroLock);
1340 } 1341 }
1341 break; 1342 break;
@@ -1371,7 +1372,7 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1371 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event, 1372 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
1372 RcvDataBufferHdr, RcvDataBufferHdr->State, 1373 RcvDataBufferHdr, RcvDataBufferHdr->State,
1373 RcvDataBufferHdr->VirtualAddress); 1374 RcvDataBufferHdr->VirtualAddress);
1374 // Drop rcv frames in non-running state 1375 /* Drop rcv frames in non-running state */
1375 switch (adapter->State) { 1376 switch (adapter->State) {
1376 case SXG_STATE_RUNNING: 1377 case SXG_STATE_RUNNING:
1377 break; 1378 break;
@@ -1384,12 +1385,12 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1384 goto drop; 1385 goto drop;
1385 } 1386 }
1386 1387
1387 // Change buffer state to UPSTREAM 1388 /* Change buffer state to UPSTREAM */
1388 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; 1389 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
1389 if (Event->Status & EVENT_STATUS_RCVERR) { 1390 if (Event->Status & EVENT_STATUS_RCVERR) {
1390 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError", 1391 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
1391 Event, Event->Status, Event->HostHandle, 0); 1392 Event, Event->Status, Event->HostHandle, 0);
1392 // XXXTODO - Remove this print later 1393 /* XXXTODO - Remove this print later */
1393 DBG_ERROR("SXG: Receive error %x\n", *(u32 *) 1394 DBG_ERROR("SXG: Receive error %x\n", *(u32 *)
1394 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)); 1395 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
1395 sxg_process_rcv_error(adapter, *(u32 *) 1396 sxg_process_rcv_error(adapter, *(u32 *)
@@ -1397,8 +1398,8 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1397 (RcvDataBufferHdr)); 1398 (RcvDataBufferHdr));
1398 goto drop; 1399 goto drop;
1399 } 1400 }
1400#if XXXTODO // VLAN stuff 1401#if XXXTODO /* VLAN stuff */
1401 // If there's a VLAN tag, extract it and validate it 1402 /* If there's a VLAN tag, extract it and validate it */
1402 if (((p_ether_header) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))-> 1403 if (((p_ether_header) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->
1403 EtherType == ETHERTYPE_VLAN) { 1404 EtherType == ETHERTYPE_VLAN) {
1404 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) != 1405 if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
@@ -1411,9 +1412,9 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1411 } 1412 }
1412 } 1413 }
1413#endif 1414#endif
1414 // 1415 /* */
1415 // Dumb-nic frame. See if it passes our mac filter and update stats 1416 /* Dumb-nic frame. See if it passes our mac filter and update stats */
1416 // 1417 /* */
1417 if (!sxg_mac_filter(adapter, (p_ether_header) 1418 if (!sxg_mac_filter(adapter, (p_ether_header)
1418 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr), 1419 SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
1419 Event->Length)) { 1420 Event->Length)) {
@@ -1427,9 +1428,9 @@ static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
1427 1428
1428 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv", 1429 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
1429 RcvDataBufferHdr, Packet, Event->Length, 0); 1430 RcvDataBufferHdr, Packet, Event->Length, 0);
1430 // 1431 /* */
1431 // Lastly adjust the receive packet length. 1432 /* Lastly adjust the receive packet length. */
1432 // 1433 /* */
1433 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event); 1434 SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
1434 1435
1435 return (Packet); 1436 return (Packet);
@@ -1541,7 +1542,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
1541 1542
1542 if (SXG_MULTICAST_PACKET(EtherHdr)) { 1543 if (SXG_MULTICAST_PACKET(EtherHdr)) {
1543 if (SXG_BROADCAST_PACKET(EtherHdr)) { 1544 if (SXG_BROADCAST_PACKET(EtherHdr)) {
1544 // broadcast 1545 /* broadcast */
1545 if (adapter->MacFilter & MAC_BCAST) { 1546 if (adapter->MacFilter & MAC_BCAST) {
1546 adapter->Stats.DumbRcvBcastPkts++; 1547 adapter->Stats.DumbRcvBcastPkts++;
1547 adapter->Stats.DumbRcvBcastBytes += length; 1548 adapter->Stats.DumbRcvBcastBytes += length;
@@ -1550,7 +1551,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
1550 return (TRUE); 1551 return (TRUE);
1551 } 1552 }
1552 } else { 1553 } else {
1553 // multicast 1554 /* multicast */
1554 if (adapter->MacFilter & MAC_ALLMCAST) { 1555 if (adapter->MacFilter & MAC_ALLMCAST) {
1555 adapter->Stats.DumbRcvMcastPkts++; 1556 adapter->Stats.DumbRcvMcastPkts++;
1556 adapter->Stats.DumbRcvMcastBytes += length; 1557 adapter->Stats.DumbRcvMcastBytes += length;
@@ -1580,9 +1581,9 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
1580 } 1581 }
1581 } 1582 }
1582 } else if (adapter->MacFilter & MAC_DIRECTED) { 1583 } else if (adapter->MacFilter & MAC_DIRECTED) {
1583 // Not broadcast or multicast. Must be directed at us or 1584 /* Not broadcast or multicast. Must be directed at us or */
1584 // the card is in promiscuous mode. Either way, consider it 1585 /* the card is in promiscuous mode. Either way, consider it */
1585 // ours if MAC_DIRECTED is set 1586 /* ours if MAC_DIRECTED is set */
1586 adapter->Stats.DumbRcvUcastPkts++; 1587 adapter->Stats.DumbRcvUcastPkts++;
1587 adapter->Stats.DumbRcvUcastBytes += length; 1588 adapter->Stats.DumbRcvUcastBytes += length;
1588 adapter->Stats.DumbRcvPkts++; 1589 adapter->Stats.DumbRcvPkts++;
@@ -1590,7 +1591,7 @@ static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr,
1590 return (TRUE); 1591 return (TRUE);
1591 } 1592 }
1592 if (adapter->MacFilter & MAC_PROMISC) { 1593 if (adapter->MacFilter & MAC_PROMISC) {
1593 // Whatever it is, keep it. 1594 /* Whatever it is, keep it. */
1594 adapter->Stats.DumbRcvPkts++; 1595 adapter->Stats.DumbRcvPkts++;
1595 adapter->Stats.DumbRcvBytes += length; 1596 adapter->Stats.DumbRcvBytes += length;
1596 return (TRUE); 1597 return (TRUE);
@@ -1606,7 +1607,7 @@ static int sxg_register_interrupt(p_adapter_t adapter)
1606 1607
1607 DBG_ERROR 1608 DBG_ERROR
1608 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n", 1609 ("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
1609 __FUNCTION__, adapter, adapter->netdev->irq, NR_IRQS); 1610 __func__, adapter, adapter->netdev->irq, NR_IRQS);
1610 1611
1611 spin_unlock_irqrestore(&sxg_global.driver_lock, 1612 spin_unlock_irqrestore(&sxg_global.driver_lock,
1612 sxg_global.flags); 1613 sxg_global.flags);
@@ -1625,18 +1626,18 @@ static int sxg_register_interrupt(p_adapter_t adapter)
1625 } 1626 }
1626 adapter->intrregistered = 1; 1627 adapter->intrregistered = 1;
1627 adapter->IntRegistered = TRUE; 1628 adapter->IntRegistered = TRUE;
1628 // Disable RSS with line-based interrupts 1629 /* Disable RSS with line-based interrupts */
1629 adapter->MsiEnabled = FALSE; 1630 adapter->MsiEnabled = FALSE;
1630 adapter->RssEnabled = FALSE; 1631 adapter->RssEnabled = FALSE;
1631 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n", 1632 DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
1632 __FUNCTION__, adapter, adapter->netdev->irq); 1633 __func__, adapter, adapter->netdev->irq);
1633 } 1634 }
1634 return (STATUS_SUCCESS); 1635 return (STATUS_SUCCESS);
1635} 1636}
1636 1637
1637static void sxg_deregister_interrupt(p_adapter_t adapter) 1638static void sxg_deregister_interrupt(p_adapter_t adapter)
1638{ 1639{
1639 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __FUNCTION__, adapter); 1640 DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __func__, adapter);
1640#if XXXTODO 1641#if XXXTODO
1641 slic_init_cleanup(adapter); 1642 slic_init_cleanup(adapter);
1642#endif 1643#endif
@@ -1651,7 +1652,7 @@ static void sxg_deregister_interrupt(p_adapter_t adapter)
1651 adapter->rcv_broadcasts = 0; 1652 adapter->rcv_broadcasts = 0;
1652 adapter->rcv_multicasts = 0; 1653 adapter->rcv_multicasts = 0;
1653 adapter->rcv_unicasts = 0; 1654 adapter->rcv_unicasts = 0;
1654 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__); 1655 DBG_ERROR("sxg: %s EXIT\n", __func__);
1655} 1656}
1656 1657
1657/* 1658/*
@@ -1666,7 +1667,7 @@ static int sxg_if_init(p_adapter_t adapter)
1666 int status = 0; 1667 int status = 0;
1667 1668
1668 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d:%d] flags[%x]\n", 1669 DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d:%d] flags[%x]\n",
1669 __FUNCTION__, adapter->netdev->name, 1670 __func__, adapter->netdev->name,
1670 adapter->queues_initialized, adapter->state, 1671 adapter->queues_initialized, adapter->state,
1671 adapter->linkstate, dev->flags); 1672 adapter->linkstate, dev->flags);
1672 1673
@@ -1680,7 +1681,7 @@ static int sxg_if_init(p_adapter_t adapter)
1680 adapter->devflags_prev = dev->flags; 1681 adapter->devflags_prev = dev->flags;
1681 adapter->macopts = MAC_DIRECTED; 1682 adapter->macopts = MAC_DIRECTED;
1682 if (dev->flags) { 1683 if (dev->flags) {
1683 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __FUNCTION__, 1684 DBG_ERROR("sxg: %s (%s) Set MAC options: ", __func__,
1684 adapter->netdev->name); 1685 adapter->netdev->name);
1685 if (dev->flags & IFF_BROADCAST) { 1686 if (dev->flags & IFF_BROADCAST) {
1686 adapter->macopts |= MAC_BCAST; 1687 adapter->macopts |= MAC_BCAST;
@@ -1713,7 +1714,7 @@ static int sxg_if_init(p_adapter_t adapter)
1713 /* 1714 /*
1714 * clear any pending events, then enable interrupts 1715 * clear any pending events, then enable interrupts
1715 */ 1716 */
1716 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __FUNCTION__); 1717 DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __func__);
1717 1718
1718 return (STATUS_SUCCESS); 1719 return (STATUS_SUCCESS);
1719} 1720}
@@ -1724,11 +1725,11 @@ static int sxg_entry_open(p_net_device dev)
1724 int status; 1725 int status;
1725 1726
1726 ASSERT(adapter); 1727 ASSERT(adapter);
1727 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __FUNCTION__, 1728 DBG_ERROR("sxg: %s adapter->activated[%d]\n", __func__,
1728 adapter->activated); 1729 adapter->activated);
1729 DBG_ERROR 1730 DBG_ERROR
1730 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n", 1731 ("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
1731 __FUNCTION__, adapter->netdev->name, jiffies, smp_processor_id(), 1732 __func__, adapter->netdev->name, jiffies, smp_processor_id(),
1732 adapter->netdev, adapter, adapter->port); 1733 adapter->netdev, adapter, adapter->port);
1733 1734
1734 netif_stop_queue(adapter->netdev); 1735 netif_stop_queue(adapter->netdev);
@@ -1738,16 +1739,16 @@ static int sxg_entry_open(p_net_device dev)
1738 sxg_global.num_sxg_ports_active++; 1739 sxg_global.num_sxg_ports_active++;
1739 adapter->activated = 1; 1740 adapter->activated = 1;
1740 } 1741 }
1741 // Initialize the adapter 1742 /* Initialize the adapter */
1742 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __FUNCTION__); 1743 DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __func__);
1743 status = sxg_initialize_adapter(adapter); 1744 status = sxg_initialize_adapter(adapter);
1744 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n", 1745 DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
1745 __FUNCTION__, status); 1746 __func__, status);
1746 1747
1747 if (status == STATUS_SUCCESS) { 1748 if (status == STATUS_SUCCESS) {
1748 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __FUNCTION__); 1749 DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __func__);
1749 status = sxg_if_init(adapter); 1750 status = sxg_if_init(adapter);
1750 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __FUNCTION__, 1751 DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __func__,
1751 status); 1752 status);
1752 } 1753 }
1753 1754
@@ -1760,12 +1761,12 @@ static int sxg_entry_open(p_net_device dev)
1760 sxg_global.flags); 1761 sxg_global.flags);
1761 return (status); 1762 return (status);
1762 } 1763 }
1763 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __FUNCTION__); 1764 DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __func__);
1764 1765
1765 // Enable interrupts 1766 /* Enable interrupts */
1766 SXG_ENABLE_ALL_INTERRUPTS(adapter); 1767 SXG_ENABLE_ALL_INTERRUPTS(adapter);
1767 1768
1768 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__); 1769 DBG_ERROR("sxg: %s EXIT\n", __func__);
1769 1770
1770 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); 1771 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
1771 return STATUS_SUCCESS; 1772 return STATUS_SUCCESS;
@@ -1779,27 +1780,27 @@ static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
1779 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 1780 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
1780 1781
1781 ASSERT(adapter); 1782 ASSERT(adapter);
1782 DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __FUNCTION__, dev, 1783 DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __func__, dev,
1783 adapter); 1784 adapter);
1784 sxg_deregister_interrupt(adapter); 1785 sxg_deregister_interrupt(adapter);
1785 sxg_unmap_mmio_space(adapter); 1786 sxg_unmap_mmio_space(adapter);
1786 DBG_ERROR("sxg: %s unregister_netdev\n", __FUNCTION__); 1787 DBG_ERROR("sxg: %s unregister_netdev\n", __func__);
1787 unregister_netdev(dev); 1788 unregister_netdev(dev);
1788 1789
1789 mmio_start = pci_resource_start(pcidev, 0); 1790 mmio_start = pci_resource_start(pcidev, 0);
1790 mmio_len = pci_resource_len(pcidev, 0); 1791 mmio_len = pci_resource_len(pcidev, 0);
1791 1792
1792 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__, 1793 DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __func__,
1793 mmio_start, mmio_len); 1794 mmio_start, mmio_len);
1794 release_mem_region(mmio_start, mmio_len); 1795 release_mem_region(mmio_start, mmio_len);
1795 1796
1796 DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __FUNCTION__, 1797 DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __func__,
1797 (unsigned int)dev->base_addr); 1798 (unsigned int)dev->base_addr);
1798 iounmap((char *)dev->base_addr); 1799 iounmap((char *)dev->base_addr);
1799 1800
1800 DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__); 1801 DBG_ERROR("sxg: %s deallocate device\n", __func__);
1801 kfree(dev); 1802 kfree(dev);
1802 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__); 1803 DBG_ERROR("sxg: %s EXIT\n", __func__);
1803} 1804}
1804 1805
1805static int sxg_entry_halt(p_net_device dev) 1806static int sxg_entry_halt(p_net_device dev)
@@ -1807,17 +1808,17 @@ static int sxg_entry_halt(p_net_device dev)
1807 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 1808 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
1808 1809
1809 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags); 1810 spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
1810 DBG_ERROR("sxg: %s (%s) ENTER\n", __FUNCTION__, dev->name); 1811 DBG_ERROR("sxg: %s (%s) ENTER\n", __func__, dev->name);
1811 1812
1812 netif_stop_queue(adapter->netdev); 1813 netif_stop_queue(adapter->netdev);
1813 adapter->state = ADAPT_DOWN; 1814 adapter->state = ADAPT_DOWN;
1814 adapter->linkstate = LINK_DOWN; 1815 adapter->linkstate = LINK_DOWN;
1815 adapter->devflags_prev = 0; 1816 adapter->devflags_prev = 0;
1816 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n", 1817 DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
1817 __FUNCTION__, dev->name, adapter, adapter->state); 1818 __func__, dev->name, adapter, adapter->state);
1818 1819
1819 DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name); 1820 DBG_ERROR("sxg: %s (%s) EXIT\n", __func__, dev->name);
1820 DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__); 1821 DBG_ERROR("sxg: %s EXIT\n", __func__);
1821 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags); 1822 spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
1822 return (STATUS_SUCCESS); 1823 return (STATUS_SUCCESS);
1823} 1824}
@@ -1825,11 +1826,11 @@ static int sxg_entry_halt(p_net_device dev)
1825static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd) 1826static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
1826{ 1827{
1827 ASSERT(rq); 1828 ASSERT(rq);
1828// DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __FUNCTION__, cmd, rq, dev); 1829/* DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __func__, cmd, rq, dev); */
1829 switch (cmd) { 1830 switch (cmd) {
1830 case SIOCSLICSETINTAGG: 1831 case SIOCSLICSETINTAGG:
1831 { 1832 {
1832// p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 1833/* p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); */
1833 u32 data[7]; 1834 u32 data[7];
1834 u32 intagg; 1835 u32 intagg;
1835 1836
@@ -1841,12 +1842,12 @@ static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
1841 intagg = data[0]; 1842 intagg = data[0];
1842 printk(KERN_EMERG 1843 printk(KERN_EMERG
1843 "%s: set interrupt aggregation to %d\n", 1844 "%s: set interrupt aggregation to %d\n",
1844 __FUNCTION__, intagg); 1845 __func__, intagg);
1845 return 0; 1846 return 0;
1846 } 1847 }
1847 1848
1848 default: 1849 default:
1849// DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __FUNCTION__, cmd); 1850/* DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __func__, cmd); */
1850 return -EOPNOTSUPP; 1851 return -EOPNOTSUPP;
1851 } 1852 }
1852 return 0; 1853 return 0;
@@ -1870,15 +1871,15 @@ static int sxg_send_packets(struct sk_buff *skb, p_net_device dev)
1870 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 1871 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
1871 u32 status = STATUS_SUCCESS; 1872 u32 status = STATUS_SUCCESS;
1872 1873
1873 DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__, 1874 DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __func__,
1874 skb); 1875 skb);
1875 // Check the adapter state 1876 /* Check the adapter state */
1876 switch (adapter->State) { 1877 switch (adapter->State) {
1877 case SXG_STATE_INITIALIZING: 1878 case SXG_STATE_INITIALIZING:
1878 case SXG_STATE_HALTED: 1879 case SXG_STATE_HALTED:
1879 case SXG_STATE_SHUTDOWN: 1880 case SXG_STATE_SHUTDOWN:
1880 ASSERT(0); // unexpected 1881 ASSERT(0); /* unexpected */
1881 // fall through 1882 /* fall through */
1882 case SXG_STATE_RESETTING: 1883 case SXG_STATE_RESETTING:
1883 case SXG_STATE_SLEEP: 1884 case SXG_STATE_SLEEP:
1884 case SXG_STATE_BOOTDIAG: 1885 case SXG_STATE_BOOTDIAG:
@@ -1898,23 +1899,23 @@ static int sxg_send_packets(struct sk_buff *skb, p_net_device dev)
1898 if (status != STATUS_SUCCESS) { 1899 if (status != STATUS_SUCCESS) {
1899 goto xmit_fail; 1900 goto xmit_fail;
1900 } 1901 }
1901 // send a packet 1902 /* send a packet */
1902 status = sxg_transmit_packet(adapter, skb); 1903 status = sxg_transmit_packet(adapter, skb);
1903 if (status == STATUS_SUCCESS) { 1904 if (status == STATUS_SUCCESS) {
1904 goto xmit_done; 1905 goto xmit_done;
1905 } 1906 }
1906 1907
1907 xmit_fail: 1908 xmit_fail:
1908 // reject & complete all the packets if they cant be sent 1909 /* reject & complete all the packets if they cant be sent */
1909 if (status != STATUS_SUCCESS) { 1910 if (status != STATUS_SUCCESS) {
1910#if XXXTODO 1911#if XXXTODO
1911// sxg_send_packets_fail(adapter, skb, status); 1912/* sxg_send_packets_fail(adapter, skb, status); */
1912#else 1913#else
1913 SXG_DROP_DUMB_SEND(adapter, skb); 1914 SXG_DROP_DUMB_SEND(adapter, skb);
1914 adapter->stats.tx_dropped++; 1915 adapter->stats.tx_dropped++;
1915#endif 1916#endif
1916 } 1917 }
1917 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __FUNCTION__, 1918 DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __func__,
1918 status); 1919 status);
1919 1920
1920 xmit_done: 1921 xmit_done:
@@ -1940,12 +1941,12 @@ static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb)
1940 void *SglBuffer; 1941 void *SglBuffer;
1941 u32 SglBufferLength; 1942 u32 SglBufferLength;
1942 1943
1943 // The vast majority of work is done in the shared 1944 /* The vast majority of work is done in the shared */
1944 // sxg_dumb_sgl routine. 1945 /* sxg_dumb_sgl routine. */
1945 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend", 1946 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
1946 adapter, skb, 0, 0); 1947 adapter, skb, 0, 0);
1947 1948
1948 // Allocate a SGL buffer 1949 /* Allocate a SGL buffer */
1949 SXG_GET_SGL_BUFFER(adapter, SxgSgl); 1950 SXG_GET_SGL_BUFFER(adapter, SxgSgl);
1950 if (!SxgSgl) { 1951 if (!SxgSgl) {
1951 adapter->Stats.NoSglBuf++; 1952 adapter->Stats.NoSglBuf++;
@@ -1963,9 +1964,9 @@ static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb)
1963 SxgSgl->DumbPacket = skb; 1964 SxgSgl->DumbPacket = skb;
1964 pSgl = NULL; 1965 pSgl = NULL;
1965 1966
1966 // Call the common sxg_dumb_sgl routine to complete the send. 1967 /* Call the common sxg_dumb_sgl routine to complete the send. */
1967 sxg_dumb_sgl(pSgl, SxgSgl); 1968 sxg_dumb_sgl(pSgl, SxgSgl);
1968 // Return success sxg_dumb_sgl (or something later) will complete it. 1969 /* Return success sxg_dumb_sgl (or something later) will complete it. */
1969 return (STATUS_SUCCESS); 1970 return (STATUS_SUCCESS);
1970} 1971}
1971 1972
@@ -1983,39 +1984,39 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
1983{ 1984{
1984 p_adapter_t adapter = SxgSgl->adapter; 1985 p_adapter_t adapter = SxgSgl->adapter;
1985 struct sk_buff *skb = SxgSgl->DumbPacket; 1986 struct sk_buff *skb = SxgSgl->DumbPacket;
1986 // For now, all dumb-nic sends go on RSS queue zero 1987 /* For now, all dumb-nic sends go on RSS queue zero */
1987 PSXG_XMT_RING XmtRing = &adapter->XmtRings[0]; 1988 PSXG_XMT_RING XmtRing = &adapter->XmtRings[0];
1988 PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo; 1989 PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo;
1989 PSXG_CMD XmtCmd = NULL; 1990 PSXG_CMD XmtCmd = NULL;
1990// u32 Index = 0; 1991/* u32 Index = 0; */
1991 u32 DataLength = skb->len; 1992 u32 DataLength = skb->len;
1992// unsigned int BufLen; 1993/* unsigned int BufLen; */
1993// u32 SglOffset; 1994/* u32 SglOffset; */
1994 u64 phys_addr; 1995 u64 phys_addr;
1995 1996
1996 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl", 1997 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
1997 pSgl, SxgSgl, 0, 0); 1998 pSgl, SxgSgl, 0, 0);
1998 1999
1999 // Set aside a pointer to the sgl 2000 /* Set aside a pointer to the sgl */
2000 SxgSgl->pSgl = pSgl; 2001 SxgSgl->pSgl = pSgl;
2001 2002
2002 // Sanity check that our SGL format is as we expect. 2003 /* Sanity check that our SGL format is as we expect. */
2003 ASSERT(sizeof(SXG_X64_SGE) == sizeof(SCATTER_GATHER_ELEMENT)); 2004 ASSERT(sizeof(SXG_X64_SGE) == sizeof(SCATTER_GATHER_ELEMENT));
2004 // Shouldn't be a vlan tag on this frame 2005 /* Shouldn't be a vlan tag on this frame */
2005 ASSERT(SxgSgl->VlanTag.VlanTci == 0); 2006 ASSERT(SxgSgl->VlanTag.VlanTci == 0);
2006 ASSERT(SxgSgl->VlanTag.VlanTpid == 0); 2007 ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
2007 2008
2008 // From here below we work with the SGL placed in our 2009 /* From here below we work with the SGL placed in our */
2009 // buffer. 2010 /* buffer. */
2010 2011
2011 SxgSgl->Sgl.NumberOfElements = 1; 2012 SxgSgl->Sgl.NumberOfElements = 1;
2012 2013
2013 // Grab the spinlock and acquire a command 2014 /* Grab the spinlock and acquire a command */
2014 spin_lock(&adapter->XmtZeroLock); 2015 spin_lock(&adapter->XmtZeroLock);
2015 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl); 2016 SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
2016 if (XmtCmd == NULL) { 2017 if (XmtCmd == NULL) {
2017 // Call sxg_complete_slow_send to see if we can 2018 /* Call sxg_complete_slow_send to see if we can */
2018 // free up any XmtRingZero entries and then try again 2019 /* free up any XmtRingZero entries and then try again */
2019 spin_unlock(&adapter->XmtZeroLock); 2020 spin_unlock(&adapter->XmtZeroLock);
2020 sxg_complete_slow_send(adapter); 2021 sxg_complete_slow_send(adapter);
2021 spin_lock(&adapter->XmtZeroLock); 2022 spin_lock(&adapter->XmtZeroLock);
@@ -2027,10 +2028,10 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
2027 } 2028 }
2028 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd", 2029 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
2029 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0); 2030 XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
2030 // Update stats 2031 /* Update stats */
2031 adapter->Stats.DumbXmtPkts++; 2032 adapter->Stats.DumbXmtPkts++;
2032 adapter->Stats.DumbXmtBytes += DataLength; 2033 adapter->Stats.DumbXmtBytes += DataLength;
2033#if XXXTODO // Stats stuff 2034#if XXXTODO /* Stats stuff */
2034 if (SXG_MULTICAST_PACKET(EtherHdr)) { 2035 if (SXG_MULTICAST_PACKET(EtherHdr)) {
2035 if (SXG_BROADCAST_PACKET(EtherHdr)) { 2036 if (SXG_BROADCAST_PACKET(EtherHdr)) {
2036 adapter->Stats.DumbXmtBcastPkts++; 2037 adapter->Stats.DumbXmtBcastPkts++;
@@ -2044,8 +2045,8 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
2044 adapter->Stats.DumbXmtUcastBytes += DataLength; 2045 adapter->Stats.DumbXmtUcastBytes += DataLength;
2045 } 2046 }
2046#endif 2047#endif
2047 // Fill in the command 2048 /* Fill in the command */
2048 // Copy out the first SGE to the command and adjust for offset 2049 /* Copy out the first SGE to the command and adjust for offset */
2049 phys_addr = 2050 phys_addr =
2050 pci_map_single(adapter->pcidev, skb->data, skb->len, 2051 pci_map_single(adapter->pcidev, skb->data, skb->len,
2051 PCI_DMA_TODEVICE); 2052 PCI_DMA_TODEVICE);
@@ -2053,54 +2054,54 @@ static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
2053 XmtCmd->Buffer.FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress << 32; 2054 XmtCmd->Buffer.FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress << 32;
2054 XmtCmd->Buffer.FirstSgeAddress = 2055 XmtCmd->Buffer.FirstSgeAddress =
2055 XmtCmd->Buffer.FirstSgeAddress | SXG_GET_ADDR_LOW(phys_addr); 2056 XmtCmd->Buffer.FirstSgeAddress | SXG_GET_ADDR_LOW(phys_addr);
2056// XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address; 2057/* XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address; */
2057// XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset; 2058/* XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset; */
2058 XmtCmd->Buffer.FirstSgeLength = DataLength; 2059 XmtCmd->Buffer.FirstSgeLength = DataLength;
2059 // Set a pointer to the remaining SGL entries 2060 /* Set a pointer to the remaining SGL entries */
2060// XmtCmd->Sgl = SxgSgl->PhysicalAddress; 2061/* XmtCmd->Sgl = SxgSgl->PhysicalAddress; */
2061 // Advance the physical address of the SxgSgl structure to 2062 /* Advance the physical address of the SxgSgl structure to */
2062 // the second SGE 2063 /* the second SGE */
2063// SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) - 2064/* SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) - */
2064// (u32 *)SxgSgl); 2065/* (u32 *)SxgSgl); */
2065// XmtCmd->Sgl.LowPart += SglOffset; 2066/* XmtCmd->Sgl.LowPart += SglOffset; */
2066 XmtCmd->Buffer.SgeOffset = 0; 2067 XmtCmd->Buffer.SgeOffset = 0;
2067 // Note - TotalLength might be overwritten with MSS below.. 2068 /* Note - TotalLength might be overwritten with MSS below.. */
2068 XmtCmd->Buffer.TotalLength = DataLength; 2069 XmtCmd->Buffer.TotalLength = DataLength;
2069 XmtCmd->SgEntries = 1; //(ushort)(SxgSgl->Sgl.NumberOfElements - Index); 2070 XmtCmd->SgEntries = 1; /*(ushort)(SxgSgl->Sgl.NumberOfElements - Index); */
2070 XmtCmd->Flags = 0; 2071 XmtCmd->Flags = 0;
2071 // 2072 /* */
2072 // Advance transmit cmd descripter by 1. 2073 /* Advance transmit cmd descripter by 1. */
2073 // NOTE - See comments in SxgTcpOutput where we write 2074 /* NOTE - See comments in SxgTcpOutput where we write */
2074 // to the XmtCmd register regarding CPU ID values and/or 2075 /* to the XmtCmd register regarding CPU ID values and/or */
2075 // multiple commands. 2076 /* multiple commands. */
2076 // 2077 /* */
2077 // 2078 /* */
2078 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE); 2079 WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE);
2079 // 2080 /* */
2080 // 2081 /* */
2081 adapter->Stats.XmtQLen++; // Stats within lock 2082 adapter->Stats.XmtQLen++; /* Stats within lock */
2082 spin_unlock(&adapter->XmtZeroLock); 2083 spin_unlock(&adapter->XmtZeroLock);
2083 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2", 2084 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
2084 XmtCmd, pSgl, SxgSgl, 0); 2085 XmtCmd, pSgl, SxgSgl, 0);
2085 return; 2086 return;
2086 2087
2087 abortcmd: 2088 abortcmd:
2088 // NOTE - Only jump to this label AFTER grabbing the 2089 /* NOTE - Only jump to this label AFTER grabbing the */
2089 // XmtZeroLock, and DO NOT DROP IT between the 2090 /* XmtZeroLock, and DO NOT DROP IT between the */
2090 // command allocation and the following abort. 2091 /* command allocation and the following abort. */
2091 if (XmtCmd) { 2092 if (XmtCmd) {
2092 SXG_ABORT_CMD(XmtRingInfo); 2093 SXG_ABORT_CMD(XmtRingInfo);
2093 } 2094 }
2094 spin_unlock(&adapter->XmtZeroLock); 2095 spin_unlock(&adapter->XmtZeroLock);
2095 2096
2096// failsgl: 2097/* failsgl: */
2097 // Jump to this label if failure occurs before the 2098 /* Jump to this label if failure occurs before the */
2098 // XmtZeroLock is grabbed 2099 /* XmtZeroLock is grabbed */
2099 adapter->Stats.XmtErrors++; 2100 adapter->Stats.XmtErrors++;
2100 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal", 2101 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
2101 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail); 2102 pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
2102 2103
2103 SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); // SxgSgl->DumbPacket is the skb 2104 SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); /* SxgSgl->DumbPacket is the skb */
2104} 2105}
2105 2106
2106/*************************************************************** 2107/***************************************************************
@@ -2127,122 +2128,122 @@ static int sxg_initialize_link(p_adapter_t adapter)
2127 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink", 2128 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
2128 adapter, 0, 0, 0); 2129 adapter, 0, 0, 0);
2129 2130
2130 // Reset PHY and XGXS module 2131 /* Reset PHY and XGXS module */
2131 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE); 2132 WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
2132 2133
2133 // Reset transmit configuration register 2134 /* Reset transmit configuration register */
2134 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE); 2135 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
2135 2136
2136 // Reset receive configuration register 2137 /* Reset receive configuration register */
2137 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE); 2138 WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
2138 2139
2139 // Reset all MAC modules 2140 /* Reset all MAC modules */
2140 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE); 2141 WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
2141 2142
2142 // Link address 0 2143 /* Link address 0 */
2143 // XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f) 2144 /* XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f) */
2144 // is stored with the first nibble (0a) in the byte 0 2145 /* is stored with the first nibble (0a) in the byte 0 */
2145 // of the Mac address. Possibly reverse? 2146 /* of the Mac address. Possibly reverse? */
2146 Value = *(u32 *) adapter->MacAddr; 2147 Value = *(u32 *) adapter->MacAddr;
2147 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE); 2148 WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
2148 // also write the MAC address to the MAC. Endian is reversed. 2149 /* also write the MAC address to the MAC. Endian is reversed. */
2149 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE); 2150 WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
2150 Value = (*(u16 *) & adapter->MacAddr[4] & 0x0000FFFF); 2151 Value = (*(u16 *) & adapter->MacAddr[4] & 0x0000FFFF);
2151 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE); 2152 WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
2152 // endian swap for the MAC (put high bytes in bits [31:16], swapped) 2153 /* endian swap for the MAC (put high bytes in bits [31:16], swapped) */
2153 Value = ntohl(Value); 2154 Value = ntohl(Value);
2154 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE); 2155 WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
2155 // Link address 1 2156 /* Link address 1 */
2156 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE); 2157 WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
2157 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE); 2158 WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
2158 // Link address 2 2159 /* Link address 2 */
2159 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE); 2160 WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
2160 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE); 2161 WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
2161 // Link address 3 2162 /* Link address 3 */
2162 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE); 2163 WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
2163 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE); 2164 WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
2164 2165
2165 // Enable MAC modules 2166 /* Enable MAC modules */
2166 WRITE_REG(HwRegs->MacConfig0, 0, TRUE); 2167 WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
2167 2168
2168 // Configure MAC 2169 /* Configure MAC */
2169 WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | // Allow sending of pause 2170 WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | /* Allow sending of pause */
2170 AXGMAC_CFG1_XMT_EN | // Enable XMT 2171 AXGMAC_CFG1_XMT_EN | /* Enable XMT */
2171 AXGMAC_CFG1_RCV_PAUSE | // Enable detection of pause 2172 AXGMAC_CFG1_RCV_PAUSE | /* Enable detection of pause */
2172 AXGMAC_CFG1_RCV_EN | // Enable receive 2173 AXGMAC_CFG1_RCV_EN | /* Enable receive */
2173 AXGMAC_CFG1_SHORT_ASSERT | // short frame detection 2174 AXGMAC_CFG1_SHORT_ASSERT | /* short frame detection */
2174 AXGMAC_CFG1_CHECK_LEN | // Verify frame length 2175 AXGMAC_CFG1_CHECK_LEN | /* Verify frame length */
2175 AXGMAC_CFG1_GEN_FCS | // Generate FCS 2176 AXGMAC_CFG1_GEN_FCS | /* Generate FCS */
2176 AXGMAC_CFG1_PAD_64), // Pad frames to 64 bytes 2177 AXGMAC_CFG1_PAD_64), /* Pad frames to 64 bytes */
2177 TRUE); 2178 TRUE);
2178 2179
2179 // Set AXGMAC max frame length if jumbo. Not needed for standard MTU 2180 /* Set AXGMAC max frame length if jumbo. Not needed for standard MTU */
2180 if (adapter->JumboEnabled) { 2181 if (adapter->JumboEnabled) {
2181 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE); 2182 WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
2182 } 2183 }
2183 // AMIIM Configuration Register - 2184 /* AMIIM Configuration Register - */
2184 // The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion 2185 /* The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion */
2185 // (bottom bits) of this register is used to determine the 2186 /* (bottom bits) of this register is used to determine the */
2186 // MDC frequency as specified in the A-XGMAC Design Document. 2187 /* MDC frequency as specified in the A-XGMAC Design Document. */
2187 // This value must not be zero. The following value (62 or 0x3E) 2188 /* This value must not be zero. The following value (62 or 0x3E) */
2188 // is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz. 2189 /* is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz. */
2189 // Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec), 2190 /* Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec), */
2190 // we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62. 2191 /* we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62. */
2191 // This value happens to be the default value for this register, 2192 /* This value happens to be the default value for this register, */
2192 // so we really don't have to do this. 2193 /* so we really don't have to do this. */
2193 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE); 2194 WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
2194 2195
2195 // Power up and enable PHY and XAUI/XGXS/Serdes logic 2196 /* Power up and enable PHY and XAUI/XGXS/Serdes logic */
2196 WRITE_REG(HwRegs->LinkStatus, 2197 WRITE_REG(HwRegs->LinkStatus,
2197 (LS_PHY_CLR_RESET | 2198 (LS_PHY_CLR_RESET |
2198 LS_XGXS_ENABLE | 2199 LS_XGXS_ENABLE |
2199 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE); 2200 LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
2200 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n"); 2201 DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
2201 2202
2202 // Per information given by Aeluros, wait 100 ms after removing reset. 2203 /* Per information given by Aeluros, wait 100 ms after removing reset. */
2203 // It's not enough to wait for the self-clearing reset bit in reg 0 to clear. 2204 /* It's not enough to wait for the self-clearing reset bit in reg 0 to clear. */
2204 mdelay(100); 2205 mdelay(100);
2205 2206
2206 // Verify the PHY has come up by checking that the Reset bit has cleared. 2207 /* Verify the PHY has come up by checking that the Reset bit has cleared. */
2207 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2208 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2208 PHY_PMA_CONTROL1, // PMA/PMD control register 2209 PHY_PMA_CONTROL1, /* PMA/PMD control register */
2209 &Value); 2210 &Value);
2210 if (status != STATUS_SUCCESS) 2211 if (status != STATUS_SUCCESS)
2211 return (STATUS_FAILURE); 2212 return (STATUS_FAILURE);
2212 if (Value & PMA_CONTROL1_RESET) // reset complete if bit is 0 2213 if (Value & PMA_CONTROL1_RESET) /* reset complete if bit is 0 */
2213 return (STATUS_FAILURE); 2214 return (STATUS_FAILURE);
2214 2215
2215 // The SERDES should be initialized by now - confirm 2216 /* The SERDES should be initialized by now - confirm */
2216 READ_REG(HwRegs->LinkStatus, Value); 2217 READ_REG(HwRegs->LinkStatus, Value);
2217 if (Value & LS_SERDES_DOWN) // verify SERDES is initialized 2218 if (Value & LS_SERDES_DOWN) /* verify SERDES is initialized */
2218 return (STATUS_FAILURE); 2219 return (STATUS_FAILURE);
2219 2220
2220 // The XAUI link should also be up - confirm 2221 /* The XAUI link should also be up - confirm */
2221 if (!(Value & LS_XAUI_LINK_UP)) // verify XAUI link is up 2222 if (!(Value & LS_XAUI_LINK_UP)) /* verify XAUI link is up */
2222 return (STATUS_FAILURE); 2223 return (STATUS_FAILURE);
2223 2224
2224 // Initialize the PHY 2225 /* Initialize the PHY */
2225 status = sxg_phy_init(adapter); 2226 status = sxg_phy_init(adapter);
2226 if (status != STATUS_SUCCESS) 2227 if (status != STATUS_SUCCESS)
2227 return (STATUS_FAILURE); 2228 return (STATUS_FAILURE);
2228 2229
2229 // Enable the Link Alarm 2230 /* Enable the Link Alarm */
2230 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2231 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2231 LASI_CONTROL, // LASI control register 2232 LASI_CONTROL, /* LASI control register */
2232 LASI_CTL_LS_ALARM_ENABLE); // enable link alarm bit 2233 LASI_CTL_LS_ALARM_ENABLE); /* enable link alarm bit */
2233 if (status != STATUS_SUCCESS) 2234 if (status != STATUS_SUCCESS)
2234 return (STATUS_FAILURE); 2235 return (STATUS_FAILURE);
2235 2236
2236 // XXXTODO - temporary - verify bit is set 2237 /* XXXTODO - temporary - verify bit is set */
2237 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2238 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2238 LASI_CONTROL, // LASI control register 2239 LASI_CONTROL, /* LASI control register */
2239 &Value); 2240 &Value);
2240 if (status != STATUS_SUCCESS) 2241 if (status != STATUS_SUCCESS)
2241 return (STATUS_FAILURE); 2242 return (STATUS_FAILURE);
2242 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) { 2243 if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
2243 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n"); 2244 DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
2244 } 2245 }
2245 // Enable receive 2246 /* Enable receive */
2246 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME; 2247 MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
2247 ConfigData = (RCV_CONFIG_ENABLE | 2248 ConfigData = (RCV_CONFIG_ENABLE |
2248 RCV_CONFIG_ENPARSE | 2249 RCV_CONFIG_ENPARSE |
@@ -2256,7 +2257,7 @@ static int sxg_initialize_link(p_adapter_t adapter)
2256 2257
2257 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE); 2258 WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
2258 2259
2259 // Mark the link as down. We'll get a link event when it comes up. 2260 /* Mark the link as down. We'll get a link event when it comes up. */
2260 sxg_link_state(adapter, SXG_LINK_DOWN); 2261 sxg_link_state(adapter, SXG_LINK_DOWN);
2261 2262
2262 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk", 2263 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
@@ -2279,35 +2280,35 @@ static int sxg_phy_init(p_adapter_t adapter)
2279 PPHY_UCODE p; 2280 PPHY_UCODE p;
2280 int status; 2281 int status;
2281 2282
2282 DBG_ERROR("ENTER %s\n", __FUNCTION__); 2283 DBG_ERROR("ENTER %s\n", __func__);
2283 2284
2284 // Read a register to identify the PHY type 2285 /* Read a register to identify the PHY type */
2285 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2286 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2286 0xC205, // PHY ID register (?) 2287 0xC205, /* PHY ID register (?) */
2287 &Value); // XXXTODO - add def 2288 &Value); /* XXXTODO - add def */
2288 if (status != STATUS_SUCCESS) 2289 if (status != STATUS_SUCCESS)
2289 return (STATUS_FAILURE); 2290 return (STATUS_FAILURE);
2290 2291
2291 if (Value == 0x0012) { // 0x0012 == AEL2005C PHY(?) - XXXTODO - add def 2292 if (Value == 0x0012) { /* 0x0012 == AEL2005C PHY(?) - XXXTODO - add def */
2292 DBG_ERROR 2293 DBG_ERROR
2293 ("AEL2005C PHY detected. Downloading PHY microcode.\n"); 2294 ("AEL2005C PHY detected. Downloading PHY microcode.\n");
2294 2295
2295 // Initialize AEL2005C PHY and download PHY microcode 2296 /* Initialize AEL2005C PHY and download PHY microcode */
2296 for (p = PhyUcode; p->Addr != 0xFFFF; p++) { 2297 for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
2297 if (p->Addr == 0) { 2298 if (p->Addr == 0) {
2298 // if address == 0, data == sleep time in ms 2299 /* if address == 0, data == sleep time in ms */
2299 mdelay(p->Data); 2300 mdelay(p->Data);
2300 } else { 2301 } else {
2301 // write the given data to the specified address 2302 /* write the given data to the specified address */
2302 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2303 status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2303 p->Addr, // PHY address 2304 p->Addr, /* PHY address */
2304 p->Data); // PHY data 2305 p->Data); /* PHY data */
2305 if (status != STATUS_SUCCESS) 2306 if (status != STATUS_SUCCESS)
2306 return (STATUS_FAILURE); 2307 return (STATUS_FAILURE);
2307 } 2308 }
2308 } 2309 }
2309 } 2310 }
2310 DBG_ERROR("EXIT %s\n", __FUNCTION__); 2311 DBG_ERROR("EXIT %s\n", __func__);
2311 2312
2312 return (STATUS_SUCCESS); 2313 return (STATUS_SUCCESS);
2313} 2314}
@@ -2330,42 +2331,42 @@ static void sxg_link_event(p_adapter_t adapter)
2330 2331
2331 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt", 2332 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
2332 adapter, 0, 0, 0); 2333 adapter, 0, 0, 0);
2333 DBG_ERROR("ENTER %s\n", __FUNCTION__); 2334 DBG_ERROR("ENTER %s\n", __func__);
2334 2335
2335 // Check the Link Status register. We should have a Link Alarm. 2336 /* Check the Link Status register. We should have a Link Alarm. */
2336 READ_REG(HwRegs->LinkStatus, Value); 2337 READ_REG(HwRegs->LinkStatus, Value);
2337 if (Value & LS_LINK_ALARM) { 2338 if (Value & LS_LINK_ALARM) {
2338 // We got a Link Status alarm. First, pause to let the 2339 /* We got a Link Status alarm. First, pause to let the */
2339 // link state settle (it can bounce a number of times) 2340 /* link state settle (it can bounce a number of times) */
2340 mdelay(10); 2341 mdelay(10);
2341 2342
2342 // Now clear the alarm by reading the LASI status register. 2343 /* Now clear the alarm by reading the LASI status register. */
2343 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2344 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2344 LASI_STATUS, // LASI status register 2345 LASI_STATUS, /* LASI status register */
2345 &Value); 2346 &Value);
2346 if (status != STATUS_SUCCESS) { 2347 if (status != STATUS_SUCCESS) {
2347 DBG_ERROR("Error reading LASI Status MDIO register!\n"); 2348 DBG_ERROR("Error reading LASI Status MDIO register!\n");
2348 sxg_link_state(adapter, SXG_LINK_DOWN); 2349 sxg_link_state(adapter, SXG_LINK_DOWN);
2349// ASSERT(0); 2350/* ASSERT(0); */
2350 } 2351 }
2351 ASSERT(Value & LASI_STATUS_LS_ALARM); 2352 ASSERT(Value & LASI_STATUS_LS_ALARM);
2352 2353
2353 // Now get and set the link state 2354 /* Now get and set the link state */
2354 LinkState = sxg_get_link_state(adapter); 2355 LinkState = sxg_get_link_state(adapter);
2355 sxg_link_state(adapter, LinkState); 2356 sxg_link_state(adapter, LinkState);
2356 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n", 2357 DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
2357 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN")); 2358 ((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
2358 } else { 2359 } else {
2359 // XXXTODO - Assuming Link Attention is only being generated for the 2360 /* XXXTODO - Assuming Link Attention is only being generated for the */
2360 // Link Alarm pin (and not for a XAUI Link Status change), then it's 2361 /* Link Alarm pin (and not for a XAUI Link Status change), then it's */
2361 // impossible to get here. Yet we've gotten here twice (under extreme 2362 /* impossible to get here. Yet we've gotten here twice (under extreme */
2362 // conditions - bouncing the link up and down many times a second). 2363 /* conditions - bouncing the link up and down many times a second). */
2363 // Needs further investigation. 2364 /* Needs further investigation. */
2364 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n"); 2365 DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
2365 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value); 2366 DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
2366// ASSERT(0); 2367/* ASSERT(0); */
2367 } 2368 }
2368 DBG_ERROR("EXIT %s\n", __FUNCTION__); 2369 DBG_ERROR("EXIT %s\n", __func__);
2369 2370
2370} 2371}
2371 2372
@@ -2383,50 +2384,50 @@ static SXG_LINK_STATE sxg_get_link_state(p_adapter_t adapter)
2383 int status; 2384 int status;
2384 u32 Value; 2385 u32 Value;
2385 2386
2386 DBG_ERROR("ENTER %s\n", __FUNCTION__); 2387 DBG_ERROR("ENTER %s\n", __func__);
2387 2388
2388 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink", 2389 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
2389 adapter, 0, 0, 0); 2390 adapter, 0, 0, 0);
2390 2391
2391 // Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if 2392 /* Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if */
2392 // the following 3 bits (from 3 different MDIO registers) are all true. 2393 /* the following 3 bits (from 3 different MDIO registers) are all true. */
2393 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module 2394 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, /* PHY PMA/PMD module */
2394 PHY_PMA_RCV_DET, // PMA/PMD Receive Signal Detect register 2395 PHY_PMA_RCV_DET, /* PMA/PMD Receive Signal Detect register */
2395 &Value); 2396 &Value);
2396 if (status != STATUS_SUCCESS) 2397 if (status != STATUS_SUCCESS)
2397 goto bad; 2398 goto bad;
2398 2399
2399 // If PMA/PMD receive signal detect is 0, then the link is down 2400 /* If PMA/PMD receive signal detect is 0, then the link is down */
2400 if (!(Value & PMA_RCV_DETECT)) 2401 if (!(Value & PMA_RCV_DETECT))
2401 return (SXG_LINK_DOWN); 2402 return (SXG_LINK_DOWN);
2402 2403
2403 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, // PHY PCS module 2404 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, /* PHY PCS module */
2404 PHY_PCS_10G_STATUS1, // PCS 10GBASE-R Status 1 register 2405 PHY_PCS_10G_STATUS1, /* PCS 10GBASE-R Status 1 register */
2405 &Value); 2406 &Value);
2406 if (status != STATUS_SUCCESS) 2407 if (status != STATUS_SUCCESS)
2407 goto bad; 2408 goto bad;
2408 2409
2409 // If PCS is not locked to receive blocks, then the link is down 2410 /* If PCS is not locked to receive blocks, then the link is down */
2410 if (!(Value & PCS_10B_BLOCK_LOCK)) 2411 if (!(Value & PCS_10B_BLOCK_LOCK))
2411 return (SXG_LINK_DOWN); 2412 return (SXG_LINK_DOWN);
2412 2413
2413 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, // PHY XS module 2414 status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, /* PHY XS module */
2414 PHY_XS_LANE_STATUS, // XS Lane Status register 2415 PHY_XS_LANE_STATUS, /* XS Lane Status register */
2415 &Value); 2416 &Value);
2416 if (status != STATUS_SUCCESS) 2417 if (status != STATUS_SUCCESS)
2417 goto bad; 2418 goto bad;
2418 2419
2419 // If XS transmit lanes are not aligned, then the link is down 2420 /* If XS transmit lanes are not aligned, then the link is down */
2420 if (!(Value & XS_LANE_ALIGN)) 2421 if (!(Value & XS_LANE_ALIGN))
2421 return (SXG_LINK_DOWN); 2422 return (SXG_LINK_DOWN);
2422 2423
2423 // All 3 bits are true, so the link is up 2424 /* All 3 bits are true, so the link is up */
2424 DBG_ERROR("EXIT %s\n", __FUNCTION__); 2425 DBG_ERROR("EXIT %s\n", __func__);
2425 2426
2426 return (SXG_LINK_UP); 2427 return (SXG_LINK_UP);
2427 2428
2428 bad: 2429 bad:
2429 // An error occurred reading an MDIO register. This shouldn't happen. 2430 /* An error occurred reading an MDIO register. This shouldn't happen. */
2430 DBG_ERROR("Error reading an MDIO register!\n"); 2431 DBG_ERROR("Error reading an MDIO register!\n");
2431 ASSERT(0); 2432 ASSERT(0);
2432 return (SXG_LINK_DOWN); 2433 return (SXG_LINK_DOWN);
@@ -2437,11 +2438,11 @@ static void sxg_indicate_link_state(p_adapter_t adapter,
2437{ 2438{
2438 if (adapter->LinkState == SXG_LINK_UP) { 2439 if (adapter->LinkState == SXG_LINK_UP) {
2439 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n", 2440 DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
2440 __FUNCTION__); 2441 __func__);
2441 netif_start_queue(adapter->netdev); 2442 netif_start_queue(adapter->netdev);
2442 } else { 2443 } else {
2443 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n", 2444 DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
2444 __FUNCTION__); 2445 __func__);
2445 netif_stop_queue(adapter->netdev); 2446 netif_stop_queue(adapter->netdev);
2446 } 2447 }
2447} 2448}
@@ -2464,23 +2465,23 @@ static void sxg_link_state(p_adapter_t adapter, SXG_LINK_STATE LinkState)
2464 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT", 2465 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
2465 adapter, LinkState, adapter->LinkState, adapter->State); 2466 adapter, LinkState, adapter->LinkState, adapter->State);
2466 2467
2467 DBG_ERROR("ENTER %s\n", __FUNCTION__); 2468 DBG_ERROR("ENTER %s\n", __func__);
2468 2469
2469 // Hold the adapter lock during this routine. Maybe move 2470 /* Hold the adapter lock during this routine. Maybe move */
2470 // the lock to the caller. 2471 /* the lock to the caller. */
2471 spin_lock(&adapter->AdapterLock); 2472 spin_lock(&adapter->AdapterLock);
2472 if (LinkState == adapter->LinkState) { 2473 if (LinkState == adapter->LinkState) {
2473 // Nothing changed.. 2474 /* Nothing changed.. */
2474 spin_unlock(&adapter->AdapterLock); 2475 spin_unlock(&adapter->AdapterLock);
2475 DBG_ERROR("EXIT #0 %s\n", __FUNCTION__); 2476 DBG_ERROR("EXIT #0 %s\n", __func__);
2476 return; 2477 return;
2477 } 2478 }
2478 // Save the adapter state 2479 /* Save the adapter state */
2479 adapter->LinkState = LinkState; 2480 adapter->LinkState = LinkState;
2480 2481
2481 // Drop the lock and indicate link state 2482 /* Drop the lock and indicate link state */
2482 spin_unlock(&adapter->AdapterLock); 2483 spin_unlock(&adapter->AdapterLock);
2483 DBG_ERROR("EXIT #1 %s\n", __FUNCTION__); 2484 DBG_ERROR("EXIT #1 %s\n", __func__);
2484 2485
2485 sxg_indicate_link_state(adapter, LinkState); 2486 sxg_indicate_link_state(adapter, LinkState);
2486} 2487}
@@ -2501,76 +2502,76 @@ static int sxg_write_mdio_reg(p_adapter_t adapter,
2501 u32 DevAddr, u32 RegAddr, u32 Value) 2502 u32 DevAddr, u32 RegAddr, u32 Value)
2502{ 2503{
2503 PSXG_HW_REGS HwRegs = adapter->HwRegs; 2504 PSXG_HW_REGS HwRegs = adapter->HwRegs;
2504 u32 AddrOp; // Address operation (written to MIIM field reg) 2505 u32 AddrOp; /* Address operation (written to MIIM field reg) */
2505 u32 WriteOp; // Write operation (written to MIIM field reg) 2506 u32 WriteOp; /* Write operation (written to MIIM field reg) */
2506 u32 Cmd; // Command (written to MIIM command reg) 2507 u32 Cmd; /* Command (written to MIIM command reg) */
2507 u32 ValueRead; 2508 u32 ValueRead;
2508 u32 Timeout; 2509 u32 Timeout;
2509 2510
2510// DBG_ERROR("ENTER %s\n", __FUNCTION__); 2511/* DBG_ERROR("ENTER %s\n", __func__); */
2511 2512
2512 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO", 2513 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2513 adapter, 0, 0, 0); 2514 adapter, 0, 0, 0);
2514 2515
2515 // Ensure values don't exceed field width 2516 /* Ensure values don't exceed field width */
2516 DevAddr &= 0x001F; // 5-bit field 2517 DevAddr &= 0x001F; /* 5-bit field */
2517 RegAddr &= 0xFFFF; // 16-bit field 2518 RegAddr &= 0xFFFF; /* 16-bit field */
2518 Value &= 0xFFFF; // 16-bit field 2519 Value &= 0xFFFF; /* 16-bit field */
2519 2520
2520 // Set MIIM field register bits for an MIIM address operation 2521 /* Set MIIM field register bits for an MIIM address operation */
2521 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | 2522 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2522 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | 2523 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2523 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | 2524 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2524 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr; 2525 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2525 2526
2526 // Set MIIM field register bits for an MIIM write operation 2527 /* Set MIIM field register bits for an MIIM write operation */
2527 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | 2528 WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2528 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | 2529 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2529 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | 2530 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2530 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value; 2531 (MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
2531 2532
2532 // Set MIIM command register bits to execute an MIIM command 2533 /* Set MIIM command register bits to execute an MIIM command */
2533 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION; 2534 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2534 2535
2535 // Reset the command register command bit (in case it's not 0) 2536 /* Reset the command register command bit (in case it's not 0) */
2536 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); 2537 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2537 2538
2538 // MIIM write to set the address of the specified MDIO register 2539 /* MIIM write to set the address of the specified MDIO register */
2539 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE); 2540 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2540 2541
2541 // Write to MIIM Command Register to execute to address operation 2542 /* Write to MIIM Command Register to execute to address operation */
2542 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); 2543 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2543 2544
2544 // Poll AMIIM Indicator register to wait for completion 2545 /* Poll AMIIM Indicator register to wait for completion */
2545 Timeout = SXG_LINK_TIMEOUT; 2546 Timeout = SXG_LINK_TIMEOUT;
2546 do { 2547 do {
2547 udelay(100); // Timeout in 100us units 2548 udelay(100); /* Timeout in 100us units */
2548 READ_REG(HwRegs->MacAmiimIndicator, ValueRead); 2549 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2549 if (--Timeout == 0) { 2550 if (--Timeout == 0) {
2550 return (STATUS_FAILURE); 2551 return (STATUS_FAILURE);
2551 } 2552 }
2552 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); 2553 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2553 2554
2554 // Reset the command register command bit 2555 /* Reset the command register command bit */
2555 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); 2556 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2556 2557
2557 // MIIM write to set up an MDIO write operation 2558 /* MIIM write to set up an MDIO write operation */
2558 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE); 2559 WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
2559 2560
2560 // Write to MIIM Command Register to execute the write operation 2561 /* Write to MIIM Command Register to execute the write operation */
2561 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); 2562 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2562 2563
2563 // Poll AMIIM Indicator register to wait for completion 2564 /* Poll AMIIM Indicator register to wait for completion */
2564 Timeout = SXG_LINK_TIMEOUT; 2565 Timeout = SXG_LINK_TIMEOUT;
2565 do { 2566 do {
2566 udelay(100); // Timeout in 100us units 2567 udelay(100); /* Timeout in 100us units */
2567 READ_REG(HwRegs->MacAmiimIndicator, ValueRead); 2568 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2568 if (--Timeout == 0) { 2569 if (--Timeout == 0) {
2569 return (STATUS_FAILURE); 2570 return (STATUS_FAILURE);
2570 } 2571 }
2571 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); 2572 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2572 2573
2573// DBG_ERROR("EXIT %s\n", __FUNCTION__); 2574/* DBG_ERROR("EXIT %s\n", __func__); */
2574 2575
2575 return (STATUS_SUCCESS); 2576 return (STATUS_SUCCESS);
2576} 2577}
@@ -2591,110 +2592,78 @@ static int sxg_read_mdio_reg(p_adapter_t adapter,
2591 u32 DevAddr, u32 RegAddr, u32 *pValue) 2592 u32 DevAddr, u32 RegAddr, u32 *pValue)
2592{ 2593{
2593 PSXG_HW_REGS HwRegs = adapter->HwRegs; 2594 PSXG_HW_REGS HwRegs = adapter->HwRegs;
2594 u32 AddrOp; // Address operation (written to MIIM field reg) 2595 u32 AddrOp; /* Address operation (written to MIIM field reg) */
2595 u32 ReadOp; // Read operation (written to MIIM field reg) 2596 u32 ReadOp; /* Read operation (written to MIIM field reg) */
2596 u32 Cmd; // Command (written to MIIM command reg) 2597 u32 Cmd; /* Command (written to MIIM command reg) */
2597 u32 ValueRead; 2598 u32 ValueRead;
2598 u32 Timeout; 2599 u32 Timeout;
2599 2600
2600 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO", 2601 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
2601 adapter, 0, 0, 0); 2602 adapter, 0, 0, 0);
2602// DBG_ERROR("ENTER %s\n", __FUNCTION__); 2603/* DBG_ERROR("ENTER %s\n", __func__); */
2603 2604
2604 // Ensure values don't exceed field width 2605 /* Ensure values don't exceed field width */
2605 DevAddr &= 0x001F; // 5-bit field 2606 DevAddr &= 0x001F; /* 5-bit field */
2606 RegAddr &= 0xFFFF; // 16-bit field 2607 RegAddr &= 0xFFFF; /* 16-bit field */
2607 2608
2608 // Set MIIM field register bits for an MIIM address operation 2609 /* Set MIIM field register bits for an MIIM address operation */
2609 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | 2610 AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2610 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | 2611 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2611 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | 2612 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2612 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr; 2613 (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
2613 2614
2614 // Set MIIM field register bits for an MIIM read operation 2615 /* Set MIIM field register bits for an MIIM read operation */
2615 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) | 2616 ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
2616 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) | 2617 (DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
2617 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) | 2618 (MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
2618 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT); 2619 (MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
2619 2620
2620 // Set MIIM command register bits to execute an MIIM command 2621 /* Set MIIM command register bits to execute an MIIM command */
2621 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION; 2622 Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
2622 2623
2623 // Reset the command register command bit (in case it's not 0) 2624 /* Reset the command register command bit (in case it's not 0) */
2624 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); 2625 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2625 2626
2626 // MIIM write to set the address of the specified MDIO register 2627 /* MIIM write to set the address of the specified MDIO register */
2627 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE); 2628 WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
2628 2629
2629 // Write to MIIM Command Register to execute to address operation 2630 /* Write to MIIM Command Register to execute to address operation */
2630 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); 2631 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2631 2632
2632 // Poll AMIIM Indicator register to wait for completion 2633 /* Poll AMIIM Indicator register to wait for completion */
2633 Timeout = SXG_LINK_TIMEOUT; 2634 Timeout = SXG_LINK_TIMEOUT;
2634 do { 2635 do {
2635 udelay(100); // Timeout in 100us units 2636 udelay(100); /* Timeout in 100us units */
2636 READ_REG(HwRegs->MacAmiimIndicator, ValueRead); 2637 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2637 if (--Timeout == 0) { 2638 if (--Timeout == 0) {
2638 return (STATUS_FAILURE); 2639 return (STATUS_FAILURE);
2639 } 2640 }
2640 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); 2641 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2641 2642
2642 // Reset the command register command bit 2643 /* Reset the command register command bit */
2643 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE); 2644 WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
2644 2645
2645 // MIIM write to set up an MDIO register read operation 2646 /* MIIM write to set up an MDIO register read operation */
2646 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE); 2647 WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
2647 2648
2648 // Write to MIIM Command Register to execute the read operation 2649 /* Write to MIIM Command Register to execute the read operation */
2649 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE); 2650 WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
2650 2651
2651 // Poll AMIIM Indicator register to wait for completion 2652 /* Poll AMIIM Indicator register to wait for completion */
2652 Timeout = SXG_LINK_TIMEOUT; 2653 Timeout = SXG_LINK_TIMEOUT;
2653 do { 2654 do {
2654 udelay(100); // Timeout in 100us units 2655 udelay(100); /* Timeout in 100us units */
2655 READ_REG(HwRegs->MacAmiimIndicator, ValueRead); 2656 READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
2656 if (--Timeout == 0) { 2657 if (--Timeout == 0) {
2657 return (STATUS_FAILURE); 2658 return (STATUS_FAILURE);
2658 } 2659 }
2659 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY); 2660 } while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
2660 2661
2661 // Read the MDIO register data back from the field register 2662 /* Read the MDIO register data back from the field register */
2662 READ_REG(HwRegs->MacAmiimField, *pValue); 2663 READ_REG(HwRegs->MacAmiimField, *pValue);
2663 *pValue &= 0xFFFF; // data is in the lower 16 bits 2664 *pValue &= 0xFFFF; /* data is in the lower 16 bits */
2664 2665
2665// DBG_ERROR("EXIT %s\n", __FUNCTION__); 2666/* DBG_ERROR("EXIT %s\n", __func__); */
2666
2667 return (STATUS_SUCCESS);
2668}
2669
2670/*
2671 * Allocate a mcast_address structure to hold the multicast address.
2672 * Link it in.
2673 */
2674static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
2675{
2676 p_mcast_address_t mcaddr, mlist;
2677 bool equaladdr;
2678
2679 /* Check to see if it already exists */
2680 mlist = adapter->mcastaddrs;
2681 while (mlist) {
2682 ETHER_EQ_ADDR(mlist->address, address, equaladdr);
2683 if (equaladdr) {
2684 return (STATUS_SUCCESS);
2685 }
2686 mlist = mlist->next;
2687 }
2688
2689 /* Doesn't already exist. Allocate a structure to hold it */
2690 mcaddr = kmalloc(sizeof(mcast_address_t), GFP_ATOMIC);
2691 if (mcaddr == NULL)
2692 return 1;
2693
2694 memcpy(mcaddr->address, address, 6);
2695
2696 mcaddr->next = adapter->mcastaddrs;
2697 adapter->mcastaddrs = mcaddr;
2698 2667
2699 return (STATUS_SUCCESS); 2668 return (STATUS_SUCCESS);
2700} 2669}
@@ -2710,7 +2679,6 @@ static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
2710 * 2679 *
2711 */ 2680 */
2712static u32 sxg_crc_table[256]; /* Table of CRC's for all possible byte values */ 2681static u32 sxg_crc_table[256]; /* Table of CRC's for all possible byte values */
2713static u32 sxg_crc_init; /* Is table initialized */
2714 2682
2715/* 2683/*
2716 * Contruct the CRC32 table 2684 * Contruct the CRC32 table
@@ -2737,6 +2705,8 @@ static void sxg_mcast_init_crc32(void)
2737 } 2705 }
2738} 2706}
2739 2707
2708#if XXXTODO
2709static u32 sxg_crc_init; /* Is table initialized */
2740/* 2710/*
2741 * Return the MAC hast as described above. 2711 * Return the MAC hast as described above.
2742 */ 2712 */
@@ -2765,6 +2735,74 @@ static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
2765 return (machash); 2735 return (machash);
2766} 2736}
2767 2737
2738static void sxg_mcast_set_mask(p_adapter_t adapter)
2739{
2740 PSXG_UCODE_REGS sxg_regs = adapter->UcodeRegs;
2741
2742 DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __func__,
2743 adapter->netdev->name, (unsigned int)adapter->MacFilter,
2744 adapter->MulticastMask);
2745
2746 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
2747 /* Turn on all multicast addresses. We have to do this for promiscuous
2748 * mode as well as ALLMCAST mode. It saves the Microcode from having
2749 * to keep state about the MAC configuration.
2750 */
2751/* DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__func__); */
2752 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
2753 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
2754/* DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__func__, adapter->netdev->name); */
2755
2756 } else {
2757 /* Commit our multicast mast to the SLIC by writing to the multicast
2758 * address mask registers
2759 */
2760 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
2761 __func__, adapter->netdev->name,
2762 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
2763 ((ulong)
2764 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
2765
2766 WRITE_REG(sxg_regs->McastLow,
2767 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
2768 WRITE_REG(sxg_regs->McastHigh,
2769 (u32) ((adapter->
2770 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
2771 }
2772}
2773
2774/*
2775 * Allocate a mcast_address structure to hold the multicast address.
2776 * Link it in.
2777 */
2778static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
2779{
2780 p_mcast_address_t mcaddr, mlist;
2781 bool equaladdr;
2782
2783 /* Check to see if it already exists */
2784 mlist = adapter->mcastaddrs;
2785 while (mlist) {
2786 ETHER_EQ_ADDR(mlist->address, address, equaladdr);
2787 if (equaladdr) {
2788 return (STATUS_SUCCESS);
2789 }
2790 mlist = mlist->next;
2791 }
2792
2793 /* Doesn't already exist. Allocate a structure to hold it */
2794 mcaddr = kmalloc(sizeof(mcast_address_t), GFP_ATOMIC);
2795 if (mcaddr == NULL)
2796 return 1;
2797
2798 memcpy(mcaddr->address, address, 6);
2799
2800 mcaddr->next = adapter->mcastaddrs;
2801 adapter->mcastaddrs = mcaddr;
2802
2803 return (STATUS_SUCCESS);
2804}
2805
2768static void sxg_mcast_set_bit(p_adapter_t adapter, char *address) 2806static void sxg_mcast_set_bit(p_adapter_t adapter, char *address)
2769{ 2807{
2770 unsigned char crcpoly; 2808 unsigned char crcpoly;
@@ -2783,7 +2821,6 @@ static void sxg_mcast_set_bit(p_adapter_t adapter, char *address)
2783 2821
2784static void sxg_mcast_set_list(p_net_device dev) 2822static void sxg_mcast_set_list(p_net_device dev)
2785{ 2823{
2786#if XXXTODO
2787 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 2824 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
2788 int status = STATUS_SUCCESS; 2825 int status = STATUS_SUCCESS;
2789 int i; 2826 int i;
@@ -2809,7 +2846,7 @@ static void sxg_mcast_set_list(p_net_device dev)
2809 } 2846 }
2810 2847
2811 DBG_ERROR("%s a->devflags_prev[%x] dev->flags[%x] status[%x]\n", 2848 DBG_ERROR("%s a->devflags_prev[%x] dev->flags[%x] status[%x]\n",
2812 __FUNCTION__, adapter->devflags_prev, dev->flags, status); 2849 __func__, adapter->devflags_prev, dev->flags, status);
2813 if (adapter->devflags_prev != dev->flags) { 2850 if (adapter->devflags_prev != dev->flags) {
2814 adapter->macopts = MAC_DIRECTED; 2851 adapter->macopts = MAC_DIRECTED;
2815 if (dev->flags) { 2852 if (dev->flags) {
@@ -2828,60 +2865,24 @@ static void sxg_mcast_set_list(p_net_device dev)
2828 } 2865 }
2829 adapter->devflags_prev = dev->flags; 2866 adapter->devflags_prev = dev->flags;
2830 DBG_ERROR("%s call sxg_config_set adapter->macopts[%x]\n", 2867 DBG_ERROR("%s call sxg_config_set adapter->macopts[%x]\n",
2831 __FUNCTION__, adapter->macopts); 2868 __func__, adapter->macopts);
2832 sxg_config_set(adapter, TRUE); 2869 sxg_config_set(adapter, TRUE);
2833 } else { 2870 } else {
2834 if (status == STATUS_SUCCESS) { 2871 if (status == STATUS_SUCCESS) {
2835 sxg_mcast_set_mask(adapter); 2872 sxg_mcast_set_mask(adapter);
2836 } 2873 }
2837 } 2874 }
2838#endif
2839 return; 2875 return;
2840} 2876}
2841 2877#endif
2842static void sxg_mcast_set_mask(p_adapter_t adapter)
2843{
2844 PSXG_UCODE_REGS sxg_regs = adapter->UcodeRegs;
2845
2846 DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __FUNCTION__,
2847 adapter->netdev->name, (unsigned int)adapter->MacFilter,
2848 adapter->MulticastMask);
2849
2850 if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
2851 /* Turn on all multicast addresses. We have to do this for promiscuous
2852 * mode as well as ALLMCAST mode. It saves the Microcode from having
2853 * to keep state about the MAC configuration.
2854 */
2855// DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__FUNCTION__);
2856 WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
2857 WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
2858// DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__FUNCTION__, adapter->netdev->name);
2859
2860 } else {
2861 /* Commit our multicast mast to the SLIC by writing to the multicast
2862 * address mask registers
2863 */
2864 DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
2865 __FUNCTION__, adapter->netdev->name,
2866 ((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
2867 ((ulong)
2868 ((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
2869
2870 WRITE_REG(sxg_regs->McastLow,
2871 (u32) (adapter->MulticastMask & 0xFFFFFFFF), FLUSH);
2872 WRITE_REG(sxg_regs->McastHigh,
2873 (u32) ((adapter->
2874 MulticastMask >> 32) & 0xFFFFFFFF), FLUSH);
2875 }
2876}
2877 2878
2878static void sxg_unmap_mmio_space(p_adapter_t adapter) 2879static void sxg_unmap_mmio_space(p_adapter_t adapter)
2879{ 2880{
2880#if LINUX_FREES_ADAPTER_RESOURCES 2881#if LINUX_FREES_ADAPTER_RESOURCES
2881// if (adapter->Regs) { 2882/* if (adapter->Regs) { */
2882// iounmap(adapter->Regs); 2883/* iounmap(adapter->Regs); */
2883// } 2884/* } */
2884// adapter->slic_regs = NULL; 2885/* adapter->slic_regs = NULL; */
2885#endif 2886#endif
2886} 2887}
2887 2888
@@ -2909,8 +2910,8 @@ void SxgFreeResources(p_adapter_t adapter)
2909 IsrCount = adapter->MsiEnabled ? RssIds : 1; 2910 IsrCount = adapter->MsiEnabled ? RssIds : 1;
2910 2911
2911 if (adapter->BasicAllocations == FALSE) { 2912 if (adapter->BasicAllocations == FALSE) {
2912 // No allocations have been made, including spinlocks, 2913 /* No allocations have been made, including spinlocks, */
2913 // or listhead initializations. Return. 2914 /* or listhead initializations. Return. */
2914 return; 2915 return;
2915 } 2916 }
2916 2917
@@ -2920,7 +2921,7 @@ void SxgFreeResources(p_adapter_t adapter)
2920 if (!(IsListEmpty(&adapter->AllSglBuffers))) { 2921 if (!(IsListEmpty(&adapter->AllSglBuffers))) {
2921 SxgFreeSglBuffers(adapter); 2922 SxgFreeSglBuffers(adapter);
2922 } 2923 }
2923 // Free event queues. 2924 /* Free event queues. */
2924 if (adapter->EventRings) { 2925 if (adapter->EventRings) {
2925 pci_free_consistent(adapter->pcidev, 2926 pci_free_consistent(adapter->pcidev,
2926 sizeof(SXG_EVENT_RING) * RssIds, 2927 sizeof(SXG_EVENT_RING) * RssIds,
@@ -2947,17 +2948,17 @@ void SxgFreeResources(p_adapter_t adapter)
2947 SXG_FREE_PACKET_POOL(adapter->PacketPoolHandle); 2948 SXG_FREE_PACKET_POOL(adapter->PacketPoolHandle);
2948 SXG_FREE_BUFFER_POOL(adapter->BufferPoolHandle); 2949 SXG_FREE_BUFFER_POOL(adapter->BufferPoolHandle);
2949 2950
2950 // Unmap register spaces 2951 /* Unmap register spaces */
2951 SxgUnmapResources(adapter); 2952 SxgUnmapResources(adapter);
2952 2953
2953 // Deregister DMA 2954 /* Deregister DMA */
2954 if (adapter->DmaHandle) { 2955 if (adapter->DmaHandle) {
2955 SXG_DEREGISTER_DMA(adapter->DmaHandle); 2956 SXG_DEREGISTER_DMA(adapter->DmaHandle);
2956 } 2957 }
2957 // Deregister interrupt 2958 /* Deregister interrupt */
2958 SxgDeregisterInterrupt(adapter); 2959 SxgDeregisterInterrupt(adapter);
2959 2960
2960 // Possibly free system info (5.2 only) 2961 /* Possibly free system info (5.2 only) */
2961 SXG_RELEASE_SYSTEM_INFO(adapter); 2962 SXG_RELEASE_SYSTEM_INFO(adapter);
2962 2963
2963 SxgDiagFreeResources(adapter); 2964 SxgDiagFreeResources(adapter);
@@ -3047,23 +3048,23 @@ static int sxg_allocate_buffer_memory(p_adapter_t adapter,
3047 3048
3048 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem", 3049 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
3049 adapter, Size, BufferType, 0); 3050 adapter, Size, BufferType, 0);
3050 // Grab the adapter lock and check the state. 3051 /* Grab the adapter lock and check the state. */
3051 // If we're in anything other than INITIALIZING or 3052 /* If we're in anything other than INITIALIZING or */
3052 // RUNNING state, fail. This is to prevent 3053 /* RUNNING state, fail. This is to prevent */
3053 // allocations in an improper driver state 3054 /* allocations in an improper driver state */
3054 spin_lock(&adapter->AdapterLock); 3055 spin_lock(&adapter->AdapterLock);
3055 3056
3056 // Increment the AllocationsPending count while holding 3057 /* Increment the AllocationsPending count while holding */
3057 // the lock. Pause processing relies on this 3058 /* the lock. Pause processing relies on this */
3058 ++adapter->AllocationsPending; 3059 ++adapter->AllocationsPending;
3059 spin_unlock(&adapter->AdapterLock); 3060 spin_unlock(&adapter->AdapterLock);
3060 3061
3061 // At initialization time allocate resources synchronously. 3062 /* At initialization time allocate resources synchronously. */
3062 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer); 3063 Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
3063 if (Buffer == NULL) { 3064 if (Buffer == NULL) {
3064 spin_lock(&adapter->AdapterLock); 3065 spin_lock(&adapter->AdapterLock);
3065 // Decrement the AllocationsPending count while holding 3066 /* Decrement the AllocationsPending count while holding */
3066 // the lock. Pause processing relies on this 3067 /* the lock. Pause processing relies on this */
3067 --adapter->AllocationsPending; 3068 --adapter->AllocationsPending;
3068 spin_unlock(&adapter->AdapterLock); 3069 spin_unlock(&adapter->AdapterLock);
3069 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1", 3070 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
@@ -3113,10 +3114,10 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3113 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) || 3114 ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
3114 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE)); 3115 (BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
3115 ASSERT(Length == SXG_RCV_BLOCK_SIZE(BufferSize)); 3116 ASSERT(Length == SXG_RCV_BLOCK_SIZE(BufferSize));
3116 // First, initialize the contained pool of receive data 3117 /* First, initialize the contained pool of receive data */
3117 // buffers. This initialization requires NBL/NB/MDL allocations, 3118 /* buffers. This initialization requires NBL/NB/MDL allocations, */
3118 // If any of them fail, free the block and return without 3119 /* If any of them fail, free the block and return without */
3119 // queueing the shared memory 3120 /* queueing the shared memory */
3120 RcvDataBuffer = RcvBlock; 3121 RcvDataBuffer = RcvBlock;
3121#if 0 3122#if 0
3122 for (i = 0, Paddr = *PhysicalAddress; 3123 for (i = 0, Paddr = *PhysicalAddress;
@@ -3126,14 +3127,14 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3126 for (i = 0, Paddr = PhysicalAddress; 3127 for (i = 0, Paddr = PhysicalAddress;
3127 i < SXG_RCV_DESCRIPTORS_PER_BLOCK; 3128 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
3128 i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) { 3129 i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) {
3129 // 3130 /* */
3130 RcvDataBufferHdr = 3131 RcvDataBufferHdr =
3131 (PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer + 3132 (PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer +
3132 SXG_RCV_DATA_BUFFER_HDR_OFFSET 3133 SXG_RCV_DATA_BUFFER_HDR_OFFSET
3133 (BufferSize)); 3134 (BufferSize));
3134 RcvDataBufferHdr->VirtualAddress = RcvDataBuffer; 3135 RcvDataBufferHdr->VirtualAddress = RcvDataBuffer;
3135 RcvDataBufferHdr->PhysicalAddress = Paddr; 3136 RcvDataBufferHdr->PhysicalAddress = Paddr;
3136 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; // For FREE macro assertion 3137 RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; /* For FREE macro assertion */
3137 RcvDataBufferHdr->Size = 3138 RcvDataBufferHdr->Size =
3138 SXG_RCV_BUFFER_DATA_SIZE(BufferSize); 3139 SXG_RCV_BUFFER_DATA_SIZE(BufferSize);
3139 3140
@@ -3143,8 +3144,8 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3143 3144
3144 } 3145 }
3145 3146
3146 // Place this entire block of memory on the AllRcvBlocks queue so it can be 3147 /* Place this entire block of memory on the AllRcvBlocks queue so it can be */
3147 // free later 3148 /* free later */
3148 RcvBlockHdr = 3149 RcvBlockHdr =
3149 (PSXG_RCV_BLOCK_HDR) ((unsigned char *)RcvBlock + 3150 (PSXG_RCV_BLOCK_HDR) ((unsigned char *)RcvBlock +
3150 SXG_RCV_BLOCK_HDR_OFFSET(BufferSize)); 3151 SXG_RCV_BLOCK_HDR_OFFSET(BufferSize));
@@ -3155,7 +3156,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3155 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList); 3156 InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
3156 spin_unlock(&adapter->RcvQLock); 3157 spin_unlock(&adapter->RcvQLock);
3157 3158
3158 // Now free the contained receive data buffers that we initialized above 3159 /* Now free the contained receive data buffers that we initialized above */
3159 RcvDataBuffer = RcvBlock; 3160 RcvDataBuffer = RcvBlock;
3160 for (i = 0, Paddr = PhysicalAddress; 3161 for (i = 0, Paddr = PhysicalAddress;
3161 i < SXG_RCV_DESCRIPTORS_PER_BLOCK; 3162 i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
@@ -3168,7 +3169,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3168 spin_unlock(&adapter->RcvQLock); 3169 spin_unlock(&adapter->RcvQLock);
3169 } 3170 }
3170 3171
3171 // Locate the descriptor block and put it on a separate free queue 3172 /* Locate the descriptor block and put it on a separate free queue */
3172 RcvDescriptorBlock = 3173 RcvDescriptorBlock =
3173 (PSXG_RCV_DESCRIPTOR_BLOCK) ((unsigned char *)RcvBlock + 3174 (PSXG_RCV_DESCRIPTOR_BLOCK) ((unsigned char *)RcvBlock +
3174 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET 3175 SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
@@ -3186,7 +3187,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3186 adapter, RcvBlock, Length, 0); 3187 adapter, RcvBlock, Length, 0);
3187 return; 3188 return;
3188 fail: 3189 fail:
3189 // Free any allocated resources 3190 /* Free any allocated resources */
3190 if (RcvBlock) { 3191 if (RcvBlock) {
3191 RcvDataBuffer = RcvBlock; 3192 RcvDataBuffer = RcvBlock;
3192 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; 3193 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
@@ -3200,7 +3201,7 @@ static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
3200 pci_free_consistent(adapter->pcidev, 3201 pci_free_consistent(adapter->pcidev,
3201 Length, RcvBlock, PhysicalAddress); 3202 Length, RcvBlock, PhysicalAddress);
3202 } 3203 }
3203 DBG_ERROR("%s: OUT OF RESOURCES\n", __FUNCTION__); 3204 DBG_ERROR("%s: OUT OF RESOURCES\n", __func__);
3204 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail", 3205 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
3205 adapter, adapter->FreeRcvBufferCount, 3206 adapter, adapter->FreeRcvBufferCount,
3206 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount); 3207 adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
@@ -3230,7 +3231,7 @@ static void sxg_allocate_sgl_buffer_complete(p_adapter_t adapter,
3230 adapter->AllSglBufferCount++; 3231 adapter->AllSglBufferCount++;
3231 memset(SxgSgl, 0, sizeof(SXG_SCATTER_GATHER)); 3232 memset(SxgSgl, 0, sizeof(SXG_SCATTER_GATHER));
3232 SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */ 3233 SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */
3233 SxgSgl->adapter = adapter; // Initialize backpointer once 3234 SxgSgl->adapter = adapter; /* Initialize backpointer once */
3234 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList); 3235 InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
3235 spin_unlock(&adapter->SglQLock); 3236 spin_unlock(&adapter->SglQLock);
3236 SxgSgl->State = SXG_BUFFER_BUSY; 3237 SxgSgl->State = SXG_BUFFER_BUSY;
@@ -3244,14 +3245,14 @@ static unsigned char temp_mac_address[6] =
3244 3245
3245static void sxg_adapter_set_hwaddr(p_adapter_t adapter) 3246static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
3246{ 3247{
3247// DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __FUNCTION__, 3248/* DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __func__, */
3248// card->config_set, adapter->port, adapter->physport, adapter->functionnumber); 3249/* card->config_set, adapter->port, adapter->physport, adapter->functionnumber); */
3249// 3250/* */
3250// sxg_dbg_macaddrs(adapter); 3251/* sxg_dbg_macaddrs(adapter); */
3251 3252
3252 memcpy(adapter->macaddr, temp_mac_address, sizeof(SXG_CONFIG_MAC)); 3253 memcpy(adapter->macaddr, temp_mac_address, sizeof(SXG_CONFIG_MAC));
3253// DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __FUNCTION__); 3254/* DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __func__); */
3254// sxg_dbg_macaddrs(adapter); 3255/* sxg_dbg_macaddrs(adapter); */
3255 if (!(adapter->currmacaddr[0] || 3256 if (!(adapter->currmacaddr[0] ||
3256 adapter->currmacaddr[1] || 3257 adapter->currmacaddr[1] ||
3257 adapter->currmacaddr[2] || 3258 adapter->currmacaddr[2] ||
@@ -3262,18 +3263,18 @@ static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
3262 if (adapter->netdev) { 3263 if (adapter->netdev) {
3263 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6); 3264 memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
3264 } 3265 }
3265// DBG_ERROR ("%s EXIT port %d\n", __FUNCTION__, adapter->port); 3266/* DBG_ERROR ("%s EXIT port %d\n", __func__, adapter->port); */
3266 sxg_dbg_macaddrs(adapter); 3267 sxg_dbg_macaddrs(adapter);
3267 3268
3268} 3269}
3269 3270
3271#if XXXTODO
3270static int sxg_mac_set_address(p_net_device dev, void *ptr) 3272static int sxg_mac_set_address(p_net_device dev, void *ptr)
3271{ 3273{
3272#if XXXTODO
3273 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev); 3274 p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
3274 struct sockaddr *addr = ptr; 3275 struct sockaddr *addr = ptr;
3275 3276
3276 DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name); 3277 DBG_ERROR("%s ENTER (%s)\n", __func__, adapter->netdev->name);
3277 3278
3278 if (netif_running(dev)) { 3279 if (netif_running(dev)) {
3279 return -EBUSY; 3280 return -EBUSY;
@@ -3282,22 +3283,22 @@ static int sxg_mac_set_address(p_net_device dev, void *ptr)
3282 return -EBUSY; 3283 return -EBUSY;
3283 } 3284 }
3284 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", 3285 DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3285 __FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0], 3286 __func__, adapter->netdev->name, adapter->currmacaddr[0],
3286 adapter->currmacaddr[1], adapter->currmacaddr[2], 3287 adapter->currmacaddr[1], adapter->currmacaddr[2],
3287 adapter->currmacaddr[3], adapter->currmacaddr[4], 3288 adapter->currmacaddr[3], adapter->currmacaddr[4],
3288 adapter->currmacaddr[5]); 3289 adapter->currmacaddr[5]);
3289 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 3290 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3290 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len); 3291 memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
3291 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", 3292 DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
3292 __FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0], 3293 __func__, adapter->netdev->name, adapter->currmacaddr[0],
3293 adapter->currmacaddr[1], adapter->currmacaddr[2], 3294 adapter->currmacaddr[1], adapter->currmacaddr[2],
3294 adapter->currmacaddr[3], adapter->currmacaddr[4], 3295 adapter->currmacaddr[3], adapter->currmacaddr[4],
3295 adapter->currmacaddr[5]); 3296 adapter->currmacaddr[5]);
3296 3297
3297 sxg_config_set(adapter, TRUE); 3298 sxg_config_set(adapter, TRUE);
3298#endif
3299 return 0; 3299 return 0;
3300} 3300}
3301#endif
3301 3302
3302/*****************************************************************************/ 3303/*****************************************************************************/
3303/************* SXG DRIVER FUNCTIONS (below) ********************************/ 3304/************* SXG DRIVER FUNCTIONS (below) ********************************/
@@ -3321,77 +3322,77 @@ static int sxg_initialize_adapter(p_adapter_t adapter)
3321 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt", 3322 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
3322 adapter, 0, 0, 0); 3323 adapter, 0, 0, 0);
3323 3324
3324 RssIds = 1; // XXXTODO SXG_RSS_CPU_COUNT(adapter); 3325 RssIds = 1; /* XXXTODO SXG_RSS_CPU_COUNT(adapter); */
3325 IsrCount = adapter->MsiEnabled ? RssIds : 1; 3326 IsrCount = adapter->MsiEnabled ? RssIds : 1;
3326 3327
3327 // Sanity check SXG_UCODE_REGS structure definition to 3328 /* Sanity check SXG_UCODE_REGS structure definition to */
3328 // make sure the length is correct 3329 /* make sure the length is correct */
3329 ASSERT(sizeof(SXG_UCODE_REGS) == SXG_REGISTER_SIZE_PER_CPU); 3330 ASSERT(sizeof(SXG_UCODE_REGS) == SXG_REGISTER_SIZE_PER_CPU);
3330 3331
3331 // Disable interrupts 3332 /* Disable interrupts */
3332 SXG_DISABLE_ALL_INTERRUPTS(adapter); 3333 SXG_DISABLE_ALL_INTERRUPTS(adapter);
3333 3334
3334 // Set MTU 3335 /* Set MTU */
3335 ASSERT((adapter->FrameSize == ETHERMAXFRAME) || 3336 ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
3336 (adapter->FrameSize == JUMBOMAXFRAME)); 3337 (adapter->FrameSize == JUMBOMAXFRAME));
3337 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE); 3338 WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
3338 3339
3339 // Set event ring base address and size 3340 /* Set event ring base address and size */
3340 WRITE_REG64(adapter, 3341 WRITE_REG64(adapter,
3341 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0); 3342 adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
3342 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE); 3343 WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
3343 3344
3344 // Per-ISR initialization 3345 /* Per-ISR initialization */
3345 for (i = 0; i < IsrCount; i++) { 3346 for (i = 0; i < IsrCount; i++) {
3346 u64 Addr; 3347 u64 Addr;
3347 // Set interrupt status pointer 3348 /* Set interrupt status pointer */
3348 Addr = adapter->PIsr + (i * sizeof(u32)); 3349 Addr = adapter->PIsr + (i * sizeof(u32));
3349 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i); 3350 WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
3350 } 3351 }
3351 3352
3352 // XMT ring zero index 3353 /* XMT ring zero index */
3353 WRITE_REG64(adapter, 3354 WRITE_REG64(adapter,
3354 adapter->UcodeRegs[0].SPSendIndex, 3355 adapter->UcodeRegs[0].SPSendIndex,
3355 adapter->PXmtRingZeroIndex, 0); 3356 adapter->PXmtRingZeroIndex, 0);
3356 3357
3357 // Per-RSS initialization 3358 /* Per-RSS initialization */
3358 for (i = 0; i < RssIds; i++) { 3359 for (i = 0; i < RssIds; i++) {
3359 // Release all event ring entries to the Microcode 3360 /* Release all event ring entries to the Microcode */
3360 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE, 3361 WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
3361 TRUE); 3362 TRUE);
3362 } 3363 }
3363 3364
3364 // Transmit ring base and size 3365 /* Transmit ring base and size */
3365 WRITE_REG64(adapter, 3366 WRITE_REG64(adapter,
3366 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0); 3367 adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
3367 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE); 3368 WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
3368 3369
3369 // Receive ring base and size 3370 /* Receive ring base and size */
3370 WRITE_REG64(adapter, 3371 WRITE_REG64(adapter,
3371 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0); 3372 adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
3372 WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE); 3373 WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE);
3373 3374
3374 // Populate the card with receive buffers 3375 /* Populate the card with receive buffers */
3375 sxg_stock_rcv_buffers(adapter); 3376 sxg_stock_rcv_buffers(adapter);
3376 3377
3377 // Initialize checksum offload capabilities. At the moment 3378 /* Initialize checksum offload capabilities. At the moment */
3378 // we always enable IP and TCP receive checksums on the card. 3379 /* we always enable IP and TCP receive checksums on the card. */
3379 // Depending on the checksum configuration specified by the 3380 /* Depending on the checksum configuration specified by the */
3380 // user, we can choose to report or ignore the checksum 3381 /* user, we can choose to report or ignore the checksum */
3381 // information provided by the card. 3382 /* information provided by the card. */
3382 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum, 3383 WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
3383 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE); 3384 SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
3384 3385
3385 // Initialize the MAC, XAUI 3386 /* Initialize the MAC, XAUI */
3386 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __FUNCTION__); 3387 DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __func__);
3387 status = sxg_initialize_link(adapter); 3388 status = sxg_initialize_link(adapter);
3388 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __FUNCTION__, 3389 DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __func__,
3389 status); 3390 status);
3390 if (status != STATUS_SUCCESS) { 3391 if (status != STATUS_SUCCESS) {
3391 return (status); 3392 return (status);
3392 } 3393 }
3393 // Initialize Dead to FALSE. 3394 /* Initialize Dead to FALSE. */
3394 // SlicCheckForHang or SlicDumpThread will take it from here. 3395 /* SlicCheckForHang or SlicDumpThread will take it from here. */
3395 adapter->Dead = FALSE; 3396 adapter->Dead = FALSE;
3396 adapter->PingOutstanding = FALSE; 3397 adapter->PingOutstanding = FALSE;
3397 3398
@@ -3428,14 +3429,14 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
3428 3429
3429 ASSERT(RcvDescriptorBlockHdr); 3430 ASSERT(RcvDescriptorBlockHdr);
3430 3431
3431 // If we don't have the resources to fill the descriptor block, 3432 /* If we don't have the resources to fill the descriptor block, */
3432 // return failure 3433 /* return failure */
3433 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) || 3434 if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
3434 SXG_RING_FULL(RcvRingInfo)) { 3435 SXG_RING_FULL(RcvRingInfo)) {
3435 adapter->Stats.NoMem++; 3436 adapter->Stats.NoMem++;
3436 return (STATUS_FAILURE); 3437 return (STATUS_FAILURE);
3437 } 3438 }
3438 // Get a ring descriptor command 3439 /* Get a ring descriptor command */
3439 SXG_GET_CMD(RingZero, 3440 SXG_GET_CMD(RingZero,
3440 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr); 3441 RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
3441 ASSERT(RingDescriptorCmd); 3442 ASSERT(RingDescriptorCmd);
@@ -3443,7 +3444,7 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
3443 RcvDescriptorBlock = 3444 RcvDescriptorBlock =
3444 (PSXG_RCV_DESCRIPTOR_BLOCK) RcvDescriptorBlockHdr->VirtualAddress; 3445 (PSXG_RCV_DESCRIPTOR_BLOCK) RcvDescriptorBlockHdr->VirtualAddress;
3445 3446
3446 // Fill in the descriptor block 3447 /* Fill in the descriptor block */
3447 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) { 3448 for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
3448 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr); 3449 SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
3449 ASSERT(RcvDataBufferHdr); 3450 ASSERT(RcvDataBufferHdr);
@@ -3454,13 +3455,13 @@ static int sxg_fill_descriptor_block(p_adapter_t adapter,
3454 RcvDescriptorBlock->Descriptors[i].PhysicalAddress = 3455 RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
3455 RcvDataBufferHdr->PhysicalAddress; 3456 RcvDataBufferHdr->PhysicalAddress;
3456 } 3457 }
3457 // Add the descriptor block to receive descriptor ring 0 3458 /* Add the descriptor block to receive descriptor ring 0 */
3458 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress; 3459 RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
3459 3460
3460 // RcvBuffersOnCard is not protected via the receive lock (see 3461 /* RcvBuffersOnCard is not protected via the receive lock (see */
3461 // sxg_process_event_queue) We don't want to grap a lock every time a 3462 /* sxg_process_event_queue) We don't want to grap a lock every time a */
3462 // buffer is returned to us, so we use atomic interlocked functions 3463 /* buffer is returned to us, so we use atomic interlocked functions */
3463 // instead. 3464 /* instead. */
3464 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK; 3465 adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
3465 3466
3466 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk", 3467 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
@@ -3490,10 +3491,10 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
3490 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf", 3491 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
3491 adapter, adapter->RcvBuffersOnCard, 3492 adapter, adapter->RcvBuffersOnCard,
3492 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount); 3493 adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
3493 // First, see if we've got less than our minimum threshold of 3494 /* First, see if we've got less than our minimum threshold of */
3494 // receive buffers, there isn't an allocation in progress, and 3495 /* receive buffers, there isn't an allocation in progress, and */
3495 // we haven't exceeded our maximum.. get another block of buffers 3496 /* we haven't exceeded our maximum.. get another block of buffers */
3496 // None of this needs to be SMP safe. It's round numbers. 3497 /* None of this needs to be SMP safe. It's round numbers. */
3497 if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) && 3498 if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
3498 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) && 3499 (adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
3499 (adapter->AllocationsPending == 0)) { 3500 (adapter->AllocationsPending == 0)) {
@@ -3502,12 +3503,12 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
3502 ReceiveBufferSize), 3503 ReceiveBufferSize),
3503 SXG_BUFFER_TYPE_RCV); 3504 SXG_BUFFER_TYPE_RCV);
3504 } 3505 }
3505 // Now grab the RcvQLock lock and proceed 3506 /* Now grab the RcvQLock lock and proceed */
3506 spin_lock(&adapter->RcvQLock); 3507 spin_lock(&adapter->RcvQLock);
3507 while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) { 3508 while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
3508 PLIST_ENTRY _ple; 3509 PLIST_ENTRY _ple;
3509 3510
3510 // Get a descriptor block 3511 /* Get a descriptor block */
3511 RcvDescriptorBlockHdr = NULL; 3512 RcvDescriptorBlockHdr = NULL;
3512 if (adapter->FreeRcvBlockCount) { 3513 if (adapter->FreeRcvBlockCount) {
3513 _ple = RemoveHeadList(&adapter->FreeRcvBlocks); 3514 _ple = RemoveHeadList(&adapter->FreeRcvBlocks);
@@ -3519,14 +3520,14 @@ static void sxg_stock_rcv_buffers(p_adapter_t adapter)
3519 } 3520 }
3520 3521
3521 if (RcvDescriptorBlockHdr == NULL) { 3522 if (RcvDescriptorBlockHdr == NULL) {
3522 // Bail out.. 3523 /* Bail out.. */
3523 adapter->Stats.NoMem++; 3524 adapter->Stats.NoMem++;
3524 break; 3525 break;
3525 } 3526 }
3526 // Fill in the descriptor block and give it to the card 3527 /* Fill in the descriptor block and give it to the card */
3527 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) == 3528 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
3528 STATUS_FAILURE) { 3529 STATUS_FAILURE) {
3529 // Free the descriptor block 3530 /* Free the descriptor block */
3530 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, 3531 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
3531 RcvDescriptorBlockHdr); 3532 RcvDescriptorBlockHdr);
3532 break; 3533 break;
@@ -3560,15 +3561,15 @@ static void sxg_complete_descriptor_blocks(p_adapter_t adapter,
3560 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks", 3561 SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
3561 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail); 3562 adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
3562 3563
3563 // Now grab the RcvQLock lock and proceed 3564 /* Now grab the RcvQLock lock and proceed */
3564 spin_lock(&adapter->RcvQLock); 3565 spin_lock(&adapter->RcvQLock);
3565 ASSERT(Index != RcvRingInfo->Tail); 3566 ASSERT(Index != RcvRingInfo->Tail);
3566 while (RcvRingInfo->Tail != Index) { 3567 while (RcvRingInfo->Tail != Index) {
3567 // 3568 /* */
3568 // Locate the current Cmd (ring descriptor entry), and 3569 /* Locate the current Cmd (ring descriptor entry), and */
3569 // associated receive descriptor block, and advance 3570 /* associated receive descriptor block, and advance */
3570 // the tail 3571 /* the tail */
3571 // 3572 /* */
3572 SXG_RETURN_CMD(RingZero, 3573 SXG_RETURN_CMD(RingZero,
3573 RcvRingInfo, 3574 RcvRingInfo,
3574 RingDescriptorCmd, RcvDescriptorBlockHdr); 3575 RingDescriptorCmd, RcvDescriptorBlockHdr);
@@ -3576,12 +3577,12 @@ static void sxg_complete_descriptor_blocks(p_adapter_t adapter,
3576 RcvRingInfo->Head, RcvRingInfo->Tail, 3577 RcvRingInfo->Head, RcvRingInfo->Tail,
3577 RingDescriptorCmd, RcvDescriptorBlockHdr); 3578 RingDescriptorCmd, RcvDescriptorBlockHdr);
3578 3579
3579 // Clear the SGL field 3580 /* Clear the SGL field */
3580 RingDescriptorCmd->Sgl = 0; 3581 RingDescriptorCmd->Sgl = 0;
3581 // Attempt to refill it and hand it right back to the 3582 /* Attempt to refill it and hand it right back to the */
3582 // card. If we fail to refill it, free the descriptor block 3583 /* card. If we fail to refill it, free the descriptor block */
3583 // header. The card will be restocked later via the 3584 /* header. The card will be restocked later via the */
3584 // RcvBuffersOnCard test 3585 /* RcvBuffersOnCard test */
3585 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) == 3586 if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
3586 STATUS_FAILURE) { 3587 STATUS_FAILURE) {
3587 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, 3588 SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
diff --git a/drivers/staging/sxg/sxg_os.h b/drivers/staging/sxg/sxg_os.h
index 26fb0ffafa5c..01182689aaba 100644
--- a/drivers/staging/sxg/sxg_os.h
+++ b/drivers/staging/sxg/sxg_os.h
@@ -44,7 +44,6 @@
44#define FALSE (0) 44#define FALSE (0)
45#define TRUE (1) 45#define TRUE (1)
46 46
47
48typedef struct _LIST_ENTRY { 47typedef struct _LIST_ENTRY {
49 struct _LIST_ENTRY *nle_flink; 48 struct _LIST_ENTRY *nle_flink;
50 struct _LIST_ENTRY *nle_blink; 49 struct _LIST_ENTRY *nle_blink;
@@ -69,35 +68,32 @@ typedef struct _LIST_ENTRY {
69 68
70/* These two have to be inlined since they return things. */ 69/* These two have to be inlined since they return things. */
71 70
72static __inline PLIST_ENTRY 71static __inline PLIST_ENTRY RemoveHeadList(list_entry * l)
73RemoveHeadList(list_entry *l)
74{ 72{
75 list_entry *f; 73 list_entry *f;
76 list_entry *e; 74 list_entry *e;
77 75
78 e = l->nle_flink; 76 e = l->nle_flink;
79 f = e->nle_flink; 77 f = e->nle_flink;
80 l->nle_flink = f; 78 l->nle_flink = f;
81 f->nle_blink = l; 79 f->nle_blink = l;
82 80
83 return (e); 81 return (e);
84} 82}
85 83
86static __inline PLIST_ENTRY 84static __inline PLIST_ENTRY RemoveTailList(list_entry * l)
87RemoveTailList(list_entry *l)
88{ 85{
89 list_entry *b; 86 list_entry *b;
90 list_entry *e; 87 list_entry *e;
91 88
92 e = l->nle_blink; 89 e = l->nle_blink;
93 b = e->nle_blink; 90 b = e->nle_blink;
94 l->nle_blink = b; 91 l->nle_blink = b;
95 b->nle_flink = l; 92 b->nle_flink = l;
96 93
97 return (e); 94 return (e);
98} 95}
99 96
100
101#define InsertTailList(l, e) \ 97#define InsertTailList(l, e) \
102 do { \ 98 do { \
103 list_entry *b; \ 99 list_entry *b; \
@@ -120,7 +116,6 @@ RemoveTailList(list_entry *l)
120 (l)->nle_flink = (e); \ 116 (l)->nle_flink = (e); \
121 } while (0) 117 } while (0)
122 118
123
124#define ATK_DEBUG 1 119#define ATK_DEBUG 1
125 120
126#if ATK_DEBUG 121#if ATK_DEBUG
@@ -133,7 +128,6 @@ RemoveTailList(list_entry *l)
133#define SLIC_TIMESTAMP(value) 128#define SLIC_TIMESTAMP(value)
134#endif 129#endif
135 130
136
137/****************** SXG DEFINES *****************************************/ 131/****************** SXG DEFINES *****************************************/
138 132
139#ifdef ATKDBG 133#ifdef ATKDBG
@@ -150,5 +144,4 @@ RemoveTailList(list_entry *l)
150#define WRITE_REG64(a,reg,value,cpu) sxg_reg64_write((a),(&reg),(value),(cpu)) 144#define WRITE_REG64(a,reg,value,cpu) sxg_reg64_write((a),(&reg),(value),(cpu))
151#define READ_REG(reg,value) (value) = readl((void __iomem *)(&reg)) 145#define READ_REG(reg,value) (value) = readl((void __iomem *)(&reg))
152 146
153#endif /* _SLIC_OS_SPECIFIC_H_ */ 147#endif /* _SLIC_OS_SPECIFIC_H_ */
154
diff --git a/drivers/staging/sxg/sxgdbg.h b/drivers/staging/sxg/sxgdbg.h
index cfb6c7c77a9e..4522b8d71495 100644
--- a/drivers/staging/sxg/sxgdbg.h
+++ b/drivers/staging/sxg/sxgdbg.h
@@ -58,7 +58,7 @@
58 { \ 58 { \
59 if (!(a)) { \ 59 if (!(a)) { \
60 DBG_ERROR("ASSERT() Failure: file %s, function %s line %d\n",\ 60 DBG_ERROR("ASSERT() Failure: file %s, function %s line %d\n",\
61 __FILE__, __FUNCTION__, __LINE__); \ 61 __FILE__, __func__, __LINE__); \
62 } \ 62 } \
63 } 63 }
64#endif 64#endif
diff --git a/drivers/staging/sxg/sxghif.h b/drivers/staging/sxg/sxghif.h
index ed26ceaa1315..88bffbaa3be8 100644
--- a/drivers/staging/sxg/sxghif.h
+++ b/drivers/staging/sxg/sxghif.h
@@ -14,119 +14,119 @@
14 *******************************************************************************/ 14 *******************************************************************************/
15typedef struct _SXG_UCODE_REGS { 15typedef struct _SXG_UCODE_REGS {
16 // Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0 16 // Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0
17 u32 Icr; // Code = 0 (extended), ExCode = 0 - Int control 17 u32 Icr; // Code = 0 (extended), ExCode = 0 - Int control
18 u32 RsvdReg1; // Code = 1 - TOE -NA 18 u32 RsvdReg1; // Code = 1 - TOE -NA
19 u32 RsvdReg2; // Code = 2 - TOE -NA 19 u32 RsvdReg2; // Code = 2 - TOE -NA
20 u32 RsvdReg3; // Code = 3 - TOE -NA 20 u32 RsvdReg3; // Code = 3 - TOE -NA
21 u32 RsvdReg4; // Code = 4 - TOE -NA 21 u32 RsvdReg4; // Code = 4 - TOE -NA
22 u32 RsvdReg5; // Code = 5 - TOE -NA 22 u32 RsvdReg5; // Code = 5 - TOE -NA
23 u32 CardUp; // Code = 6 - Microcode initialized when 1 23 u32 CardUp; // Code = 6 - Microcode initialized when 1
24 u32 RsvdReg7; // Code = 7 - TOE -NA 24 u32 RsvdReg7; // Code = 7 - TOE -NA
25 u32 CodeNotUsed[8]; // Codes 8-15 not used. ExCode = 0 25 u32 CodeNotUsed[8]; // Codes 8-15 not used. ExCode = 0
26 // This brings us to ExCode 1 at address 0x40 = Interrupt status pointer 26 // This brings us to ExCode 1 at address 0x40 = Interrupt status pointer
27 u32 Isp; // Code = 0 (extended), ExCode = 1 27 u32 Isp; // Code = 0 (extended), ExCode = 1
28 u32 PadEx1[15]; // Codes 1-15 not used with extended codes 28 u32 PadEx1[15]; // Codes 1-15 not used with extended codes
29 // ExCode 2 = Interrupt Status Register 29 // ExCode 2 = Interrupt Status Register
30 u32 Isr; // Code = 0 (extended), ExCode = 2 30 u32 Isr; // Code = 0 (extended), ExCode = 2
31 u32 PadEx2[15]; 31 u32 PadEx2[15];
32 // ExCode 3 = Event base register. Location of event rings 32 // ExCode 3 = Event base register. Location of event rings
33 u32 EventBase; // Code = 0 (extended), ExCode = 3 33 u32 EventBase; // Code = 0 (extended), ExCode = 3
34 u32 PadEx3[15]; 34 u32 PadEx3[15];
35 // ExCode 4 = Event ring size 35 // ExCode 4 = Event ring size
36 u32 EventSize; // Code = 0 (extended), ExCode = 4 36 u32 EventSize; // Code = 0 (extended), ExCode = 4
37 u32 PadEx4[15]; 37 u32 PadEx4[15];
38 // ExCode 5 = TCB Buffers base address 38 // ExCode 5 = TCB Buffers base address
39 u32 TcbBase; // Code = 0 (extended), ExCode = 5 39 u32 TcbBase; // Code = 0 (extended), ExCode = 5
40 u32 PadEx5[15]; 40 u32 PadEx5[15];
41 // ExCode 6 = TCB Composite Buffers base address 41 // ExCode 6 = TCB Composite Buffers base address
42 u32 TcbCompBase; // Code = 0 (extended), ExCode = 6 42 u32 TcbCompBase; // Code = 0 (extended), ExCode = 6
43 u32 PadEx6[15]; 43 u32 PadEx6[15];
44 // ExCode 7 = Transmit ring base address 44 // ExCode 7 = Transmit ring base address
45 u32 XmtBase; // Code = 0 (extended), ExCode = 7 45 u32 XmtBase; // Code = 0 (extended), ExCode = 7
46 u32 PadEx7[15]; 46 u32 PadEx7[15];
47 // ExCode 8 = Transmit ring size 47 // ExCode 8 = Transmit ring size
48 u32 XmtSize; // Code = 0 (extended), ExCode = 8 48 u32 XmtSize; // Code = 0 (extended), ExCode = 8
49 u32 PadEx8[15]; 49 u32 PadEx8[15];
50 // ExCode 9 = Receive ring base address 50 // ExCode 9 = Receive ring base address
51 u32 RcvBase; // Code = 0 (extended), ExCode = 9 51 u32 RcvBase; // Code = 0 (extended), ExCode = 9
52 u32 PadEx9[15]; 52 u32 PadEx9[15];
53 // ExCode 10 = Receive ring size 53 // ExCode 10 = Receive ring size
54 u32 RcvSize; // Code = 0 (extended), ExCode = 10 54 u32 RcvSize; // Code = 0 (extended), ExCode = 10
55 u32 PadEx10[15]; 55 u32 PadEx10[15];
56 // ExCode 11 = Read EEPROM Config 56 // ExCode 11 = Read EEPROM Config
57 u32 Config; // Code = 0 (extended), ExCode = 11 57 u32 Config; // Code = 0 (extended), ExCode = 11
58 u32 PadEx11[15]; 58 u32 PadEx11[15];
59 // ExCode 12 = Multicast bits 31:0 59 // ExCode 12 = Multicast bits 31:0
60 u32 McastLow; // Code = 0 (extended), ExCode = 12 60 u32 McastLow; // Code = 0 (extended), ExCode = 12
61 u32 PadEx12[15]; 61 u32 PadEx12[15];
62 // ExCode 13 = Multicast bits 63:32 62 // ExCode 13 = Multicast bits 63:32
63 u32 McastHigh; // Code = 0 (extended), ExCode = 13 63 u32 McastHigh; // Code = 0 (extended), ExCode = 13
64 u32 PadEx13[15]; 64 u32 PadEx13[15];
65 // ExCode 14 = Ping 65 // ExCode 14 = Ping
66 u32 Ping; // Code = 0 (extended), ExCode = 14 66 u32 Ping; // Code = 0 (extended), ExCode = 14
67 u32 PadEx14[15]; 67 u32 PadEx14[15];
68 // ExCode 15 = Link MTU 68 // ExCode 15 = Link MTU
69 u32 LinkMtu; // Code = 0 (extended), ExCode = 15 69 u32 LinkMtu; // Code = 0 (extended), ExCode = 15
70 u32 PadEx15[15]; 70 u32 PadEx15[15];
71 // ExCode 16 = Download synchronization 71 // ExCode 16 = Download synchronization
72 u32 LoadSync; // Code = 0 (extended), ExCode = 16 72 u32 LoadSync; // Code = 0 (extended), ExCode = 16
73 u32 PadEx16[15]; 73 u32 PadEx16[15];
74 // ExCode 17 = Upper DRAM address bits on 32-bit systems 74 // ExCode 17 = Upper DRAM address bits on 32-bit systems
75 u32 Upper; // Code = 0 (extended), ExCode = 17 75 u32 Upper; // Code = 0 (extended), ExCode = 17
76 u32 PadEx17[15]; 76 u32 PadEx17[15];
77 // ExCode 18 = Slowpath Send Index Address 77 // ExCode 18 = Slowpath Send Index Address
78 u32 SPSendIndex; // Code = 0 (extended), ExCode = 18 78 u32 SPSendIndex; // Code = 0 (extended), ExCode = 18
79 u32 PadEx18[15]; 79 u32 PadEx18[15];
80 u32 RsvdXF; // Code = 0 (extended), ExCode = 19 80 u32 RsvdXF; // Code = 0 (extended), ExCode = 19
81 u32 PadEx19[15]; 81 u32 PadEx19[15];
82 // ExCode 20 = Aggregation 82 // ExCode 20 = Aggregation
83 u32 Aggregation; // Code = 0 (extended), ExCode = 20 83 u32 Aggregation; // Code = 0 (extended), ExCode = 20
84 u32 PadEx20[15]; 84 u32 PadEx20[15];
85 // ExCode 21 = Receive MDL push timer 85 // ExCode 21 = Receive MDL push timer
86 u32 PushTicks; // Code = 0 (extended), ExCode = 21 86 u32 PushTicks; // Code = 0 (extended), ExCode = 21
87 u32 PadEx21[15]; 87 u32 PadEx21[15];
88 // ExCode 22 = TOE NA 88 // ExCode 22 = TOE NA
89 u32 AckFrequency; // Code = 0 (extended), ExCode = 22 89 u32 AckFrequency; // Code = 0 (extended), ExCode = 22
90 u32 PadEx22[15]; 90 u32 PadEx22[15];
91 // ExCode 23 = TOE NA 91 // ExCode 23 = TOE NA
92 u32 RsvdReg23; 92 u32 RsvdReg23;
93 u32 PadEx23[15]; 93 u32 PadEx23[15];
94 // ExCode 24 = TOE NA 94 // ExCode 24 = TOE NA
95 u32 RsvdReg24; 95 u32 RsvdReg24;
96 u32 PadEx24[15]; 96 u32 PadEx24[15];
97 // ExCode 25 = TOE NA 97 // ExCode 25 = TOE NA
98 u32 RsvdReg25; // Code = 0 (extended), ExCode = 25 98 u32 RsvdReg25; // Code = 0 (extended), ExCode = 25
99 u32 PadEx25[15]; 99 u32 PadEx25[15];
100 // ExCode 26 = Receive checksum requirements 100 // ExCode 26 = Receive checksum requirements
101 u32 ReceiveChecksum; // Code = 0 (extended), ExCode = 26 101 u32 ReceiveChecksum; // Code = 0 (extended), ExCode = 26
102 u32 PadEx26[15]; 102 u32 PadEx26[15];
103 // ExCode 27 = RSS Requirements 103 // ExCode 27 = RSS Requirements
104 u32 Rss; // Code = 0 (extended), ExCode = 27 104 u32 Rss; // Code = 0 (extended), ExCode = 27
105 u32 PadEx27[15]; 105 u32 PadEx27[15];
106 // ExCode 28 = RSS Table 106 // ExCode 28 = RSS Table
107 u32 RssTable; // Code = 0 (extended), ExCode = 28 107 u32 RssTable; // Code = 0 (extended), ExCode = 28
108 u32 PadEx28[15]; 108 u32 PadEx28[15];
109 // ExCode 29 = Event ring release entries 109 // ExCode 29 = Event ring release entries
110 u32 EventRelease; // Code = 0 (extended), ExCode = 29 110 u32 EventRelease; // Code = 0 (extended), ExCode = 29
111 u32 PadEx29[15]; 111 u32 PadEx29[15];
112 // ExCode 30 = Number of receive bufferlist commands on ring 0 112 // ExCode 30 = Number of receive bufferlist commands on ring 0
113 u32 RcvCmd; // Code = 0 (extended), ExCode = 30 113 u32 RcvCmd; // Code = 0 (extended), ExCode = 30
114 u32 PadEx30[15]; 114 u32 PadEx30[15];
115 // ExCode 31 = slowpath transmit command - Data[31:0] = 1 115 // ExCode 31 = slowpath transmit command - Data[31:0] = 1
116 u32 XmtCmd; // Code = 0 (extended), ExCode = 31 116 u32 XmtCmd; // Code = 0 (extended), ExCode = 31
117 u32 PadEx31[15]; 117 u32 PadEx31[15];
118 // ExCode 32 = Dump command 118 // ExCode 32 = Dump command
119 u32 DumpCmd; // Code = 0 (extended), ExCode = 32 119 u32 DumpCmd; // Code = 0 (extended), ExCode = 32
120 u32 PadEx32[15]; 120 u32 PadEx32[15];
121 // ExCode 33 = Debug command 121 // ExCode 33 = Debug command
122 u32 DebugCmd; // Code = 0 (extended), ExCode = 33 122 u32 DebugCmd; // Code = 0 (extended), ExCode = 33
123 u32 PadEx33[15]; 123 u32 PadEx33[15];
124 // There are 128 possible extended commands - each of account for 16 124 // There are 128 possible extended commands - each of account for 16
125 // words (including the non-relevent base command codes 1-15). 125 // words (including the non-relevent base command codes 1-15).
126 // Pad for the remainder of these here to bring us to the next CPU 126 // Pad for the remainder of these here to bring us to the next CPU
127 // base. As extended codes are added, reduce the first array value in 127 // base. As extended codes are added, reduce the first array value in
128 // the following field 128 // the following field
129 u32 PadToNextCpu[94][16]; // 94 = 128 - 34 (34 = Excodes 0 - 33) 129 u32 PadToNextCpu[94][16]; // 94 = 128 - 34 (34 = Excodes 0 - 33)
130} SXG_UCODE_REGS, *PSXG_UCODE_REGS; 130} SXG_UCODE_REGS, *PSXG_UCODE_REGS;
131 131
132// Interrupt control register (0) values 132// Interrupt control register (0) values
@@ -141,7 +141,7 @@ typedef struct _SXG_UCODE_REGS {
141 141
142// The Microcode supports up to 16 RSS queues 142// The Microcode supports up to 16 RSS queues
143#define SXG_MAX_RSS 16 143#define SXG_MAX_RSS 16
144#define SXG_MAX_RSS_TABLE_SIZE 256 // 256-byte max 144#define SXG_MAX_RSS_TABLE_SIZE 256 // 256-byte max
145 145
146#define SXG_RSS_TCP6 0x00000001 // RSS TCP over IPv6 146#define SXG_RSS_TCP6 0x00000001 // RSS TCP over IPv6
147#define SXG_RSS_TCP4 0x00000002 // RSS TCP over IPv4 147#define SXG_RSS_TCP4 0x00000002 // RSS TCP over IPv4
@@ -170,16 +170,16 @@ typedef struct _SXG_UCODE_REGS {
170 * SXG_UCODE_REGS definition above 170 * SXG_UCODE_REGS definition above
171 */ 171 */
172typedef struct _SXG_TCB_REGS { 172typedef struct _SXG_TCB_REGS {
173 u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */ 173 u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */
174 u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */ 174 u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */
175 u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */ 175 u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */
176 u32 Rsvd1; /* Code = 3 - TOE NA */ 176 u32 Rsvd1; /* Code = 3 - TOE NA */
177 u32 Rsvd2; /* Code = 4 - TOE NA */ 177 u32 Rsvd2; /* Code = 4 - TOE NA */
178 u32 Rsvd3; /* Code = 5 - TOE NA */ 178 u32 Rsvd3; /* Code = 5 - TOE NA */
179 u32 Invalid; /* Code = 6 - Reserved for "CardUp" see above */ 179 u32 Invalid; /* Code = 6 - Reserved for "CardUp" see above */
180 u32 Rsvd4; /* Code = 7 - TOE NA */ 180 u32 Rsvd4; /* Code = 7 - TOE NA */
181 u32 Rsvd5; /* Code = 8 - TOE NA */ 181 u32 Rsvd5; /* Code = 8 - TOE NA */
182 u32 Pad[7]; /* Codes 8-15 - Not used. */ 182 u32 Pad[7]; /* Codes 8-15 - Not used. */
183} SXG_TCB_REGS, *PSXG_TCB_REGS; 183} SXG_TCB_REGS, *PSXG_TCB_REGS;
184 184
185/*************************************************************************** 185/***************************************************************************
@@ -273,27 +273,27 @@ typedef struct _SXG_TCB_REGS {
273 */ 273 */
274#pragma pack(push, 1) 274#pragma pack(push, 1)
275typedef struct _SXG_EVENT { 275typedef struct _SXG_EVENT {
276 u32 Pad[1]; // not used 276 u32 Pad[1]; // not used
277 u32 SndUna; // SndUna value 277 u32 SndUna; // SndUna value
278 u32 Resid; // receive MDL resid 278 u32 Resid; // receive MDL resid
279 union { 279 union {
280 void * HostHandle; // Receive host handle 280 void *HostHandle; // Receive host handle
281 u32 Rsvd1; // TOE NA 281 u32 Rsvd1; // TOE NA
282 struct { 282 struct {
283 u32 NotUsed; 283 u32 NotUsed;
284 u32 Rsvd2; // TOE NA 284 u32 Rsvd2; // TOE NA
285 } Flush; 285 } Flush;
286 }; 286 };
287 u32 Toeplitz; // RSS Toeplitz hash 287 u32 Toeplitz; // RSS Toeplitz hash
288 union { 288 union {
289 ushort Rsvd3; // TOE NA 289 ushort Rsvd3; // TOE NA
290 ushort HdrOffset; // Slowpath 290 ushort HdrOffset; // Slowpath
291 }; 291 };
292 ushort Length; // 292 ushort Length; //
293 unsigned char Rsvd4; // TOE NA 293 unsigned char Rsvd4; // TOE NA
294 unsigned char Code; // Event code 294 unsigned char Code; // Event code
295 unsigned char CommandIndex; // New ring index 295 unsigned char CommandIndex; // New ring index
296 unsigned char Status; // Event status 296 unsigned char Status; // Event status
297} SXG_EVENT, *PSXG_EVENT; 297} SXG_EVENT, *PSXG_EVENT;
298#pragma pack(pop) 298#pragma pack(pop)
299 299
@@ -318,12 +318,12 @@ typedef struct _SXG_EVENT {
318// Event ring 318// Event ring
319// Size must be power of 2, between 128 and 16k 319// Size must be power of 2, between 128 and 16k
320#define EVENT_RING_SIZE 4096 // ?? 320#define EVENT_RING_SIZE 4096 // ??
321#define EVENT_RING_BATCH 16 // Hand entries back 16 at a time. 321#define EVENT_RING_BATCH 16 // Hand entries back 16 at a time.
322#define EVENT_BATCH_LIMIT 256 // Stop processing events after 256 (16 * 16) 322#define EVENT_BATCH_LIMIT 256 // Stop processing events after 256 (16 * 16)
323 323
324typedef struct _SXG_EVENT_RING { 324typedef struct _SXG_EVENT_RING {
325 SXG_EVENT Ring[EVENT_RING_SIZE]; 325 SXG_EVENT Ring[EVENT_RING_SIZE];
326}SXG_EVENT_RING, *PSXG_EVENT_RING; 326} SXG_EVENT_RING, *PSXG_EVENT_RING;
327 327
328/*************************************************************************** 328/***************************************************************************
329 * 329 *
@@ -341,7 +341,7 @@ typedef struct _SXG_EVENT_RING {
341#define SXG_TCB_PER_BUCKET 16 341#define SXG_TCB_PER_BUCKET 16
342#define SXG_TCB_BUCKET_MASK 0xFF0 // Bucket portion of TCB ID 342#define SXG_TCB_BUCKET_MASK 0xFF0 // Bucket portion of TCB ID
343#define SXG_TCB_ELEMENT_MASK 0x00F // Element within bucket 343#define SXG_TCB_ELEMENT_MASK 0x00F // Element within bucket
344#define SXG_TCB_BUCKETS 256 // 256 * 16 = 4k 344#define SXG_TCB_BUCKETS 256 // 256 * 16 = 4k
345 345
346#define SXG_TCB_BUFFER_SIZE 512 // ASSERT format is correct 346#define SXG_TCB_BUFFER_SIZE 512 // ASSERT format is correct
347 347
@@ -368,7 +368,6 @@ typedef struct _SXG_EVENT_RING {
368 &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip : \ 368 &(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip : \
369 &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip 369 &(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip
370 370
371
372#if DBG 371#if DBG
373// Horrible kludge to distinguish dumb-nic, slowpath, and 372// Horrible kludge to distinguish dumb-nic, slowpath, and
374// fastpath traffic. Decrement the HopLimit by one 373// fastpath traffic. Decrement the HopLimit by one
@@ -396,16 +395,16 @@ typedef struct _SXG_EVENT_RING {
396 * Receive and transmit rings 395 * Receive and transmit rings
397 ***************************************************************************/ 396 ***************************************************************************/
398#define SXG_MAX_RING_SIZE 256 397#define SXG_MAX_RING_SIZE 256
399#define SXG_XMT_RING_SIZE 128 // Start with 128 398#define SXG_XMT_RING_SIZE 128 // Start with 128
400#define SXG_RCV_RING_SIZE 128 // Start with 128 399#define SXG_RCV_RING_SIZE 128 // Start with 128
401#define SXG_MAX_ENTRIES 4096 400#define SXG_MAX_ENTRIES 4096
402 401
403// Structure and macros to manage a ring 402// Structure and macros to manage a ring
404typedef struct _SXG_RING_INFO { 403typedef struct _SXG_RING_INFO {
405 unsigned char Head; // Where we add entries - Note unsigned char:RING_SIZE 404 unsigned char Head; // Where we add entries - Note unsigned char:RING_SIZE
406 unsigned char Tail; // Where we pull off completed entries 405 unsigned char Tail; // Where we pull off completed entries
407 ushort Size; // Ring size - Must be multiple of 2 406 ushort Size; // Ring size - Must be multiple of 2
408 void * Context[SXG_MAX_RING_SIZE]; // Shadow ring 407 void *Context[SXG_MAX_RING_SIZE]; // Shadow ring
409} SXG_RING_INFO, *PSXG_RING_INFO; 408} SXG_RING_INFO, *PSXG_RING_INFO;
410 409
411#define SXG_INITIALIZE_RING(_ring, _size) { \ 410#define SXG_INITIALIZE_RING(_ring, _size) { \
@@ -483,40 +482,40 @@ typedef struct _SXG_RING_INFO {
483 */ 482 */
484#pragma pack(push, 1) 483#pragma pack(push, 1)
485typedef struct _SXG_CMD { 484typedef struct _SXG_CMD {
486 dma_addr_t Sgl; // Physical address of SGL 485 dma_addr_t Sgl; // Physical address of SGL
487 union { 486 union {
488 struct { 487 struct {
489 dma64_addr_t FirstSgeAddress;// Address of first SGE 488 dma64_addr_t FirstSgeAddress; // Address of first SGE
490 u32 FirstSgeLength; // Length of first SGE 489 u32 FirstSgeLength; // Length of first SGE
491 union { 490 union {
492 u32 Rsvd1; // TOE NA 491 u32 Rsvd1; // TOE NA
493 u32 SgeOffset; // Slowpath - 2nd SGE offset 492 u32 SgeOffset; // Slowpath - 2nd SGE offset
494 u32 Resid; // MDL completion - clobbers update 493 u32 Resid; // MDL completion - clobbers update
495 }; 494 };
496 union { 495 union {
497 u32 TotalLength; // Total transfer length 496 u32 TotalLength; // Total transfer length
498 u32 Mss; // LSO MSS 497 u32 Mss; // LSO MSS
499 }; 498 };
500 } Buffer; 499 } Buffer;
501 }; 500 };
502 union { 501 union {
503 struct { 502 struct {
504 unsigned char Flags:4; // slowpath flags 503 unsigned char Flags:4; // slowpath flags
505 unsigned char IpHl:4; // Ip header length (>>2) 504 unsigned char IpHl:4; // Ip header length (>>2)
506 unsigned char MacLen; // Mac header len 505 unsigned char MacLen; // Mac header len
507 } CsumFlags; 506 } CsumFlags;
508 struct { 507 struct {
509 ushort Flags:4; // slowpath flags 508 ushort Flags:4; // slowpath flags
510 ushort TcpHdrOff:7; // TCP 509 ushort TcpHdrOff:7; // TCP
511 ushort MacLen:5; // Mac header len 510 ushort MacLen:5; // Mac header len
512 } LsoFlags; 511 } LsoFlags;
513 ushort Flags; // flags 512 ushort Flags; // flags
514 }; 513 };
515 union { 514 union {
516 ushort SgEntries; // SG entry count including first sge 515 ushort SgEntries; // SG entry count including first sge
517 struct { 516 struct {
518 unsigned char Status; // Copied from event status 517 unsigned char Status; // Copied from event status
519 unsigned char NotUsed; 518 unsigned char NotUsed;
520 } Status; 519 } Status;
521 }; 520 };
522} SXG_CMD, *PSXG_CMD; 521} SXG_CMD, *PSXG_CMD;
@@ -524,8 +523,8 @@ typedef struct _SXG_CMD {
524 523
525#pragma pack(push, 1) 524#pragma pack(push, 1)
526typedef struct _VLAN_HDR { 525typedef struct _VLAN_HDR {
527 ushort VlanTci; 526 ushort VlanTci;
528 ushort VlanTpid; 527 ushort VlanTpid;
529} VLAN_HDR, *PVLAN_HDR; 528} VLAN_HDR, *PVLAN_HDR;
530#pragma pack(pop) 529#pragma pack(pop)
531 530
@@ -561,16 +560,16 @@ typedef struct _VLAN_HDR {
561 * 560 *
562 */ 561 */
563// Slowpath CMD flags 562// Slowpath CMD flags
564#define SXG_SLOWCMD_CSUM_IP 0x01 // Checksum IP 563#define SXG_SLOWCMD_CSUM_IP 0x01 // Checksum IP
565#define SXG_SLOWCMD_CSUM_TCP 0x02 // Checksum TCP 564#define SXG_SLOWCMD_CSUM_TCP 0x02 // Checksum TCP
566#define SXG_SLOWCMD_LSO 0x04 // Large segment send 565#define SXG_SLOWCMD_LSO 0x04 // Large segment send
567 566
568typedef struct _SXG_XMT_RING { 567typedef struct _SXG_XMT_RING {
569 SXG_CMD Descriptors[SXG_XMT_RING_SIZE]; 568 SXG_CMD Descriptors[SXG_XMT_RING_SIZE];
570} SXG_XMT_RING, *PSXG_XMT_RING; 569} SXG_XMT_RING, *PSXG_XMT_RING;
571 570
572typedef struct _SXG_RCV_RING { 571typedef struct _SXG_RCV_RING {
573 SXG_CMD Descriptors[SXG_RCV_RING_SIZE]; 572 SXG_CMD Descriptors[SXG_RCV_RING_SIZE];
574} SXG_RCV_RING, *PSXG_RCV_RING; 573} SXG_RCV_RING, *PSXG_RCV_RING;
575 574
576/*************************************************************************** 575/***************************************************************************
@@ -578,8 +577,8 @@ typedef struct _SXG_RCV_RING {
578 * shared memory allocation 577 * shared memory allocation
579 ***************************************************************************/ 578 ***************************************************************************/
580typedef enum { 579typedef enum {
581 SXG_BUFFER_TYPE_RCV, // Receive buffer 580 SXG_BUFFER_TYPE_RCV, // Receive buffer
582 SXG_BUFFER_TYPE_SGL // SGL buffer 581 SXG_BUFFER_TYPE_SGL // SGL buffer
583} SXG_BUFFER_TYPE; 582} SXG_BUFFER_TYPE;
584 583
585// State for SXG buffers 584// State for SXG buffers
@@ -668,60 +667,60 @@ typedef enum {
668#define SXG_RCV_DATA_BUFFERS 4096 // Amount to give to the card 667#define SXG_RCV_DATA_BUFFERS 4096 // Amount to give to the card
669#define SXG_INITIAL_RCV_DATA_BUFFERS 8192 // Initial pool of buffers 668#define SXG_INITIAL_RCV_DATA_BUFFERS 8192 // Initial pool of buffers
670#define SXG_MIN_RCV_DATA_BUFFERS 2048 // Minimum amount and when to get more 669#define SXG_MIN_RCV_DATA_BUFFERS 2048 // Minimum amount and when to get more
671#define SXG_MAX_RCV_BLOCKS 128 // = 16384 receive buffers 670#define SXG_MAX_RCV_BLOCKS 128 // = 16384 receive buffers
672 671
673// Receive buffer header 672// Receive buffer header
674typedef struct _SXG_RCV_DATA_BUFFER_HDR { 673typedef struct _SXG_RCV_DATA_BUFFER_HDR {
675 dma_addr_t PhysicalAddress; // Buffer physical address 674 dma_addr_t PhysicalAddress; // Buffer physical address
676 // Note - DO NOT USE the VirtualAddress field to locate data. 675 // Note - DO NOT USE the VirtualAddress field to locate data.
677 // Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead. 676 // Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead.
678 void *VirtualAddress; // Start of buffer 677 void *VirtualAddress; // Start of buffer
679 LIST_ENTRY FreeList; // Free queue of buffers 678 LIST_ENTRY FreeList; // Free queue of buffers
680 struct _SXG_RCV_DATA_BUFFER_HDR *Next; // Fastpath data buffer queue 679 struct _SXG_RCV_DATA_BUFFER_HDR *Next; // Fastpath data buffer queue
681 u32 Size; // Buffer size 680 u32 Size; // Buffer size
682 u32 ByteOffset; // See SXG_RESTORE_MDL_OFFSET 681 u32 ByteOffset; // See SXG_RESTORE_MDL_OFFSET
683 unsigned char State; // See SXG_BUFFER state above 682 unsigned char State; // See SXG_BUFFER state above
684 unsigned char Status; // Event status (to log PUSH) 683 unsigned char Status; // Event status (to log PUSH)
685 struct sk_buff * skb; // Double mapped (nbl and pkt) 684 struct sk_buff *skb; // Double mapped (nbl and pkt)
686} SXG_RCV_DATA_BUFFER_HDR, *PSXG_RCV_DATA_BUFFER_HDR; 685} SXG_RCV_DATA_BUFFER_HDR, *PSXG_RCV_DATA_BUFFER_HDR;
687 686
688// SxgSlowReceive uses the PACKET (skb) contained 687// SxgSlowReceive uses the PACKET (skb) contained
689// in the SXG_RCV_DATA_BUFFER_HDR when indicating dumb-nic data 688// in the SXG_RCV_DATA_BUFFER_HDR when indicating dumb-nic data
690#define SxgDumbRcvPacket skb 689#define SxgDumbRcvPacket skb
691 690
692#define SXG_RCV_DATA_HDR_SIZE 256 // Space for SXG_RCV_DATA_BUFFER_HDR 691#define SXG_RCV_DATA_HDR_SIZE 256 // Space for SXG_RCV_DATA_BUFFER_HDR
693#define SXG_RCV_DATA_BUFFER_SIZE 2048 // Non jumbo = 2k including HDR 692#define SXG_RCV_DATA_BUFFER_SIZE 2048 // Non jumbo = 2k including HDR
694#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 // jumbo = 10k including HDR 693#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 // jumbo = 10k including HDR
695 694
696// Receive data descriptor 695// Receive data descriptor
697typedef struct _SXG_RCV_DATA_DESCRIPTOR { 696typedef struct _SXG_RCV_DATA_DESCRIPTOR {
698 union { 697 union {
699 struct sk_buff * VirtualAddress; // Host handle 698 struct sk_buff *VirtualAddress; // Host handle
700 u64 ForceTo8Bytes; // Force x86 to 8-byte boundary 699 u64 ForceTo8Bytes; // Force x86 to 8-byte boundary
701 }; 700 };
702 dma_addr_t PhysicalAddress; 701 dma_addr_t PhysicalAddress;
703} SXG_RCV_DATA_DESCRIPTOR, *PSXG_RCV_DATA_DESCRIPTOR; 702} SXG_RCV_DATA_DESCRIPTOR, *PSXG_RCV_DATA_DESCRIPTOR;
704 703
705// Receive descriptor block 704// Receive descriptor block
706#define SXG_RCV_DESCRIPTORS_PER_BLOCK 128 705#define SXG_RCV_DESCRIPTORS_PER_BLOCK 128
707#define SXG_RCV_DESCRIPTOR_BLOCK_SIZE 2048 // For sanity check 706#define SXG_RCV_DESCRIPTOR_BLOCK_SIZE 2048 // For sanity check
708typedef struct _SXG_RCV_DESCRIPTOR_BLOCK { 707typedef struct _SXG_RCV_DESCRIPTOR_BLOCK {
709 SXG_RCV_DATA_DESCRIPTOR Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK]; 708 SXG_RCV_DATA_DESCRIPTOR Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK];
710} SXG_RCV_DESCRIPTOR_BLOCK, *PSXG_RCV_DESCRIPTOR_BLOCK; 709} SXG_RCV_DESCRIPTOR_BLOCK, *PSXG_RCV_DESCRIPTOR_BLOCK;
711 710
712// Receive descriptor block header 711// Receive descriptor block header
713typedef struct _SXG_RCV_DESCRIPTOR_BLOCK_HDR { 712typedef struct _SXG_RCV_DESCRIPTOR_BLOCK_HDR {
714 void * VirtualAddress; // Start of 2k buffer 713 void *VirtualAddress; // Start of 2k buffer
715 dma_addr_t PhysicalAddress; // ..and it's physical address 714 dma_addr_t PhysicalAddress; // ..and it's physical address
716 LIST_ENTRY FreeList; // Free queue of descriptor blocks 715 LIST_ENTRY FreeList; // Free queue of descriptor blocks
717 unsigned char State; // See SXG_BUFFER state above 716 unsigned char State; // See SXG_BUFFER state above
718} SXG_RCV_DESCRIPTOR_BLOCK_HDR, *PSXG_RCV_DESCRIPTOR_BLOCK_HDR; 717} SXG_RCV_DESCRIPTOR_BLOCK_HDR, *PSXG_RCV_DESCRIPTOR_BLOCK_HDR;
719 718
720// Receive block header 719// Receive block header
721typedef struct _SXG_RCV_BLOCK_HDR { 720typedef struct _SXG_RCV_BLOCK_HDR {
722 void * VirtualAddress; // Start of virtual memory 721 void *VirtualAddress; // Start of virtual memory
723 dma_addr_t PhysicalAddress; // ..and it's physical address 722 dma_addr_t PhysicalAddress; // ..and it's physical address
724 LIST_ENTRY AllList; // Queue of all SXG_RCV_BLOCKS 723 LIST_ENTRY AllList; // Queue of all SXG_RCV_BLOCKS
725} SXG_RCV_BLOCK_HDR, *PSXG_RCV_BLOCK_HDR; 724} SXG_RCV_BLOCK_HDR, *PSXG_RCV_BLOCK_HDR;
726 725
727// Macros to determine data structure offsets into receive block 726// Macros to determine data structure offsets into receive block
@@ -747,8 +746,8 @@ typedef struct _SXG_RCV_BLOCK_HDR {
747// Use the miniport reserved portion of the NBL to locate 746// Use the miniport reserved portion of the NBL to locate
748// our SXG_RCV_DATA_BUFFER_HDR structure. 747// our SXG_RCV_DATA_BUFFER_HDR structure.
749typedef struct _SXG_RCV_NBL_RESERVED { 748typedef struct _SXG_RCV_NBL_RESERVED {
750 PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr; 749 PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
751 void * Available; 750 void *Available;
752} SXG_RCV_NBL_RESERVED, *PSXG_RCV_NBL_RESERVED; 751} SXG_RCV_NBL_RESERVED, *PSXG_RCV_NBL_RESERVED;
753 752
754#define SXG_RCV_NBL_BUFFER_HDR(_NBL) (((PSXG_RCV_NBL_RESERVED)NET_BUFFER_LIST_MINIPORT_RESERVED(_NBL))->RcvDataBufferHdr) 753#define SXG_RCV_NBL_BUFFER_HDR(_NBL) (((PSXG_RCV_NBL_RESERVED)NET_BUFFER_LIST_MINIPORT_RESERVED(_NBL))->RcvDataBufferHdr)
@@ -760,12 +759,11 @@ typedef struct _SXG_RCV_NBL_RESERVED {
760#define SXG_MIN_SGL_BUFFERS 2048 // Minimum amount and when to get more 759#define SXG_MIN_SGL_BUFFERS 2048 // Minimum amount and when to get more
761#define SXG_MAX_SGL_BUFFERS 16384 // Maximum to allocate (note ADAPT:ushort) 760#define SXG_MAX_SGL_BUFFERS 16384 // Maximum to allocate (note ADAPT:ushort)
762 761
763
764// Self identifying structure type 762// Self identifying structure type
765typedef enum _SXG_SGL_TYPE { 763typedef enum _SXG_SGL_TYPE {
766 SXG_SGL_DUMB, // Dumb NIC SGL 764 SXG_SGL_DUMB, // Dumb NIC SGL
767 SXG_SGL_SLOW, // Slowpath protocol header - see below 765 SXG_SGL_SLOW, // Slowpath protocol header - see below
768 SXG_SGL_CHIMNEY // Chimney offload SGL 766 SXG_SGL_CHIMNEY // Chimney offload SGL
769} SXG_SGL_TYPE, PSXG_SGL_TYPE; 767} SXG_SGL_TYPE, PSXG_SGL_TYPE;
770 768
771// Note - the description below is Microsoft specific 769// Note - the description below is Microsoft specific
@@ -774,14 +772,14 @@ typedef enum _SXG_SGL_TYPE {
774// for the SCATTER_GATHER_LIST portion of the SXG_SCATTER_GATHER data structure. 772// for the SCATTER_GATHER_LIST portion of the SXG_SCATTER_GATHER data structure.
775// The following considerations apply when setting this value: 773// The following considerations apply when setting this value:
776// - First, the Sahara card is designed to read the Microsoft SGL structure 774// - First, the Sahara card is designed to read the Microsoft SGL structure
777// straight out of host memory. This means that the SGL must reside in 775// straight out of host memory. This means that the SGL must reside in
778// shared memory. If the length here is smaller than the SGL for the 776// shared memory. If the length here is smaller than the SGL for the
779// NET_BUFFER, then NDIS will allocate its own buffer. The buffer 777// NET_BUFFER, then NDIS will allocate its own buffer. The buffer
780// that NDIS allocates is not in shared memory, so when this happens, 778// that NDIS allocates is not in shared memory, so when this happens,
781// the SGL will need to be copied to a set of SXG_SCATTER_GATHER buffers. 779// the SGL will need to be copied to a set of SXG_SCATTER_GATHER buffers.
782// In other words.. we don't want this value to be too small. 780// In other words.. we don't want this value to be too small.
783// - On the other hand.. we're allocating up to 16k of these things. If 781// - On the other hand.. we're allocating up to 16k of these things. If
784// we make this too big, we start to consume a ton of memory.. 782// we make this too big, we start to consume a ton of memory..
785// At the moment, I'm going to limit the number of SG entries to 150. 783// At the moment, I'm going to limit the number of SG entries to 150.
786// If each entry maps roughly 4k, then this should cover roughly 600kB 784// If each entry maps roughly 4k, then this should cover roughly 600kB
787// NET_BUFFERs. Furthermore, since each entry is 24 bytes, the total 785// NET_BUFFERs. Furthermore, since each entry is 24 bytes, the total
@@ -801,24 +799,23 @@ typedef enum _SXG_SGL_TYPE {
801// the SGL. The following structure defines an x64 799// the SGL. The following structure defines an x64
802// formatted SGL entry 800// formatted SGL entry
803typedef struct _SXG_X64_SGE { 801typedef struct _SXG_X64_SGE {
804 dma64_addr_t Address; // same as wdm.h 802 dma64_addr_t Address; // same as wdm.h
805 u32 Length; // same as wdm.h 803 u32 Length; // same as wdm.h
806 u32 CompilerPad;// The compiler pads to 8-bytes 804 u32 CompilerPad; // The compiler pads to 8-bytes
807 u64 Reserved; // u32 * in wdm.h. Force to 8 bytes 805 u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
808} SXG_X64_SGE, *PSXG_X64_SGE; 806} SXG_X64_SGE, *PSXG_X64_SGE;
809 807
810typedef struct _SCATTER_GATHER_ELEMENT { 808typedef struct _SCATTER_GATHER_ELEMENT {
811 dma64_addr_t Address; // same as wdm.h 809 dma64_addr_t Address; // same as wdm.h
812 u32 Length; // same as wdm.h 810 u32 Length; // same as wdm.h
813 u32 CompilerPad;// The compiler pads to 8-bytes 811 u32 CompilerPad; // The compiler pads to 8-bytes
814 u64 Reserved; // u32 * in wdm.h. Force to 8 bytes 812 u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
815} SCATTER_GATHER_ELEMENT, *PSCATTER_GATHER_ELEMENT; 813} SCATTER_GATHER_ELEMENT, *PSCATTER_GATHER_ELEMENT;
816 814
817
818typedef struct _SCATTER_GATHER_LIST { 815typedef struct _SCATTER_GATHER_LIST {
819 u32 NumberOfElements; 816 u32 NumberOfElements;
820 u32 * Reserved; 817 u32 *Reserved;
821 SCATTER_GATHER_ELEMENT Elements[]; 818 SCATTER_GATHER_ELEMENT Elements[];
822} SCATTER_GATHER_LIST, *PSCATTER_GATHER_LIST; 819} SCATTER_GATHER_LIST, *PSCATTER_GATHER_LIST;
823 820
824// The card doesn't care about anything except elements, so 821// The card doesn't care about anything except elements, so
@@ -826,26 +823,26 @@ typedef struct _SCATTER_GATHER_LIST {
826// SGL structure. But redefine from wdm.h:SCATTER_GATHER_LIST so 823// SGL structure. But redefine from wdm.h:SCATTER_GATHER_LIST so
827// we can specify SXG_X64_SGE and define a fixed number of elements 824// we can specify SXG_X64_SGE and define a fixed number of elements
828typedef struct _SXG_X64_SGL { 825typedef struct _SXG_X64_SGL {
829 u32 NumberOfElements; 826 u32 NumberOfElements;
830 u32 * Reserved; 827 u32 *Reserved;
831 SXG_X64_SGE Elements[SXG_SGL_ENTRIES]; 828 SXG_X64_SGE Elements[SXG_SGL_ENTRIES];
832} SXG_X64_SGL, *PSXG_X64_SGL; 829} SXG_X64_SGL, *PSXG_X64_SGL;
833 830
834typedef struct _SXG_SCATTER_GATHER { 831typedef struct _SXG_SCATTER_GATHER {
835 SXG_SGL_TYPE Type; // FIRST! Dumb-nic or offload 832 SXG_SGL_TYPE Type; // FIRST! Dumb-nic or offload
836 void * adapter; // Back pointer to adapter 833 void *adapter; // Back pointer to adapter
837 LIST_ENTRY FreeList; // Free SXG_SCATTER_GATHER blocks 834 LIST_ENTRY FreeList; // Free SXG_SCATTER_GATHER blocks
838 LIST_ENTRY AllList; // All SXG_SCATTER_GATHER blocks 835 LIST_ENTRY AllList; // All SXG_SCATTER_GATHER blocks
839 dma_addr_t PhysicalAddress;// physical address 836 dma_addr_t PhysicalAddress; // physical address
840 unsigned char State; // See SXG_BUFFER state above 837 unsigned char State; // See SXG_BUFFER state above
841 unsigned char CmdIndex; // Command ring index 838 unsigned char CmdIndex; // Command ring index
842 struct sk_buff * DumbPacket; // Associated Packet 839 struct sk_buff *DumbPacket; // Associated Packet
843 u32 Direction; // For asynchronous completions 840 u32 Direction; // For asynchronous completions
844 u32 CurOffset; // Current SGL offset 841 u32 CurOffset; // Current SGL offset
845 u32 SglRef; // SGL reference count 842 u32 SglRef; // SGL reference count
846 VLAN_HDR VlanTag; // VLAN tag to be inserted into SGL 843 VLAN_HDR VlanTag; // VLAN tag to be inserted into SGL
847 PSCATTER_GATHER_LIST pSgl; // SGL Addr. Possibly &Sgl 844 PSCATTER_GATHER_LIST pSgl; // SGL Addr. Possibly &Sgl
848 SXG_X64_SGL Sgl; // SGL handed to card 845 SXG_X64_SGL Sgl; // SGL handed to card
849} SXG_SCATTER_GATHER, *PSXG_SCATTER_GATHER; 846} SXG_SCATTER_GATHER, *PSXG_SCATTER_GATHER;
850 847
851#if defined(CONFIG_X86_64) 848#if defined(CONFIG_X86_64)
@@ -856,6 +853,5 @@ typedef struct _SXG_SCATTER_GATHER {
856#define SXG_SGL_BUFFER(_SxgSgl) NULL 853#define SXG_SGL_BUFFER(_SxgSgl) NULL
857#define SXG_SGL_BUF_SIZE 0 854#define SXG_SGL_BUF_SIZE 0
858#else 855#else
859 Stop Compilation; 856Stop Compilation;
860#endif 857#endif
861
diff --git a/drivers/staging/sxg/sxghw.h b/drivers/staging/sxg/sxghw.h
index 8f4f6effdd98..2222ae91fd97 100644
--- a/drivers/staging/sxg/sxghw.h
+++ b/drivers/staging/sxg/sxghw.h
@@ -13,11 +13,11 @@
13/******************************************************************************* 13/*******************************************************************************
14 * Configuration space 14 * Configuration space
15 *******************************************************************************/ 15 *******************************************************************************/
16// PCI Vendor ID 16/* PCI Vendor ID */
17#define SXG_VENDOR_ID 0x139A // Alacritech's Vendor ID 17#define SXG_VENDOR_ID 0x139A /* Alacritech's Vendor ID */
18 18
19// PCI Device ID 19// PCI Device ID
20#define SXG_DEVICE_ID 0x0009 // Sahara Device ID 20#define SXG_DEVICE_ID 0x0009 /* Sahara Device ID */
21 21
22// 22//
23// Subsystem IDs. 23// Subsystem IDs.
@@ -141,7 +141,7 @@ typedef struct _SXG_HW_REGS {
141#define SXG_REGISTER_SIZE_PER_CPU 0x00002000 // Used to sanity check UCODE_REGS structure 141#define SXG_REGISTER_SIZE_PER_CPU 0x00002000 // Used to sanity check UCODE_REGS structure
142 142
143// Sahara receive sequencer status values 143// Sahara receive sequencer status values
144#define SXG_RCV_STATUS_ATTN 0x80000000 // Attention 144#define SXG_RCV_STATUS_ATTN 0x80000000 // Attention
145#define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 // Transport mask 145#define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 // Transport mask
146#define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 // Transport error 146#define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 // Transport error
147#define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 // Transport cksum error 147#define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 // Transport cksum error
@@ -156,9 +156,9 @@ typedef struct _SXG_HW_REGS {
156#define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 // Transport FTP 156#define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 // Transport FTP
157#define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 // Transport HTTP 157#define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 // Transport HTTP
158#define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 // Transport SMB 158#define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 // Transport SMB
159#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 // Network mask 159#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 // Network mask
160#define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 // Network error 160#define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 // Network error
161#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 // Network cksum error 161#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 // Network cksum error
162#define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 // Network underflow error 162#define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 // Network underflow error
163#define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 // Network header length 163#define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 // Network header length
164#define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 // Network overflow detected 164#define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 // Network overflow detected
@@ -167,67 +167,67 @@ typedef struct _SXG_HW_REGS {
167#define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 // Network offset detected 167#define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 // Network offset detected
168#define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 // Network fragment detected 168#define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 // Network fragment detected
169#define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 // Network transport type mask 169#define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 // Network transport type mask
170#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 // UDP 170#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 // UDP
171#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 // TCP 171#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 // TCP
172#define SXG_RCV_STATUS_IPONLY 0x00008000 // IP-only not TCP 172#define SXG_RCV_STATUS_IPONLY 0x00008000 // IP-only not TCP
173#define SXG_RCV_STATUS_PKT_PRI 0x00006000 // Receive priority 173#define SXG_RCV_STATUS_PKT_PRI 0x00006000 // Receive priority
174#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 // Receive priority shift 174#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 // Receive priority shift
175#define SXG_RCV_STATUS_PARITY 0x00001000 // MAC Receive RAM parity error 175#define SXG_RCV_STATUS_PARITY 0x00001000 // MAC Receive RAM parity error
176#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 // Link address detection mask 176#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 // Link address detection mask
177#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 // Link address D 177#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 // Link address D
178#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 // Link address C 178#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 // Link address C
179#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 // Link address B 179#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 // Link address B
180#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 // Link address A 180#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 // Link address A
181#define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 // Link address broadcast 181#define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 // Link address broadcast
182#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 // Link address multicast 182#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 // Link address multicast
183#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 // Link control multicast 183#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 // Link control multicast
184#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask 184#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
185#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 // Link error 185#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 // Link error
186#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask 186#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
187#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 // RcvMacQ parity error 187#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 // RcvMacQ parity error
188#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 // Data early 188#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 // Data early
189#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 // Buffer overflow 189#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 // Buffer overflow
190#define SXG_RCV_STATUS_LINK_CODE 0x00000084 // Link code error 190#define SXG_RCV_STATUS_LINK_CODE 0x00000084 // Link code error
191#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 // Dribble nibble 191#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 // Dribble nibble
192#define SXG_RCV_STATUS_LINK_CRC 0x00000082 // CRC error 192#define SXG_RCV_STATUS_LINK_CRC 0x00000082 // CRC error
193#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 // Link overflow 193#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 // Link overflow
194#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 // Link underflow 194#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 // Link underflow
195#define SXG_RCV_STATUS_LINK_8023 0x00000020 // 802.3 195#define SXG_RCV_STATUS_LINK_8023 0x00000020 // 802.3
196#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 // Snap 196#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 // Snap
197#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 // VLAN 197#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 // VLAN
198#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 // Network type mask 198#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 // Network type mask
199#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 // Control packet 199#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 // Control packet
200#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 // IPv6 packet 200#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 // IPv6 packet
201#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 // IPv4 packet 201#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 // IPv4 packet
202 202
203/*************************************************************************** 203/***************************************************************************
204 * Sahara receive and transmit configuration registers 204 * Sahara receive and transmit configuration registers
205 ***************************************************************************/ 205 ***************************************************************************/
206#define RCV_CONFIG_RESET 0x80000000 // RcvConfig register reset 206#define RCV_CONFIG_RESET 0x80000000 // RcvConfig register reset
207#define RCV_CONFIG_ENABLE 0x40000000 // Enable the receive logic 207#define RCV_CONFIG_ENABLE 0x40000000 // Enable the receive logic
208#define RCV_CONFIG_ENPARSE 0x20000000 // Enable the receive parser 208#define RCV_CONFIG_ENPARSE 0x20000000 // Enable the receive parser
209#define RCV_CONFIG_SOCKET 0x10000000 // Enable the socket detector 209#define RCV_CONFIG_SOCKET 0x10000000 // Enable the socket detector
210#define RCV_CONFIG_RCVBAD 0x08000000 // Receive all bad frames 210#define RCV_CONFIG_RCVBAD 0x08000000 // Receive all bad frames
211#define RCV_CONFIG_CONTROL 0x04000000 // Receive all control frames 211#define RCV_CONFIG_CONTROL 0x04000000 // Receive all control frames
212#define RCV_CONFIG_RCVPAUSE 0x02000000 // Enable pause transmit when attn 212#define RCV_CONFIG_RCVPAUSE 0x02000000 // Enable pause transmit when attn
213#define RCV_CONFIG_TZIPV6 0x01000000 // Include TCP port w/ IPv6 toeplitz 213#define RCV_CONFIG_TZIPV6 0x01000000 // Include TCP port w/ IPv6 toeplitz
214#define RCV_CONFIG_TZIPV4 0x00800000 // Include TCP port w/ IPv4 toeplitz 214#define RCV_CONFIG_TZIPV4 0x00800000 // Include TCP port w/ IPv4 toeplitz
215#define RCV_CONFIG_FLUSH 0x00400000 // Flush buffers 215#define RCV_CONFIG_FLUSH 0x00400000 // Flush buffers
216#define RCV_CONFIG_PRIORITY_MASK 0x00300000 // Priority level 216#define RCV_CONFIG_PRIORITY_MASK 0x00300000 // Priority level
217#define RCV_CONFIG_HASH_MASK 0x00030000 // Hash depth 217#define RCV_CONFIG_HASH_MASK 0x00030000 // Hash depth
218#define RCV_CONFIG_HASH_8 0x00000000 // Hash depth 8 218#define RCV_CONFIG_HASH_8 0x00000000 // Hash depth 8
219#define RCV_CONFIG_HASH_16 0x00010000 // Hash depth 16 219#define RCV_CONFIG_HASH_16 0x00010000 // Hash depth 16
220#define RCV_CONFIG_HASH_4 0x00020000 // Hash depth 4 220#define RCV_CONFIG_HASH_4 0x00020000 // Hash depth 4
221#define RCV_CONFIG_HASH_2 0x00030000 // Hash depth 2 221#define RCV_CONFIG_HASH_2 0x00030000 // Hash depth 2
222#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0 // Buffer length bits 15:4. ie multiple of 16. 222#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0 // Buffer length bits 15:4. ie multiple of 16.
223#define RCV_CONFIG_SKT_DIS 0x00000008 // Disable socket detection on attn 223#define RCV_CONFIG_SKT_DIS 0x00000008 // Disable socket detection on attn
224// Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size. 224// Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size.
225// We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC, 225// We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC,
226// and round up to nearest 16 byte boundary 226// and round up to nearest 16 byte boundary
227#define RCV_CONFIG_BUFSIZE(_MaxFrame) ((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK) 227#define RCV_CONFIG_BUFSIZE(_MaxFrame) ((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK)
228 228
229#define XMT_CONFIG_RESET 0x80000000 // XmtConfig register reset 229#define XMT_CONFIG_RESET 0x80000000 // XmtConfig register reset
230#define XMT_CONFIG_ENABLE 0x40000000 // Enable transmit logic 230#define XMT_CONFIG_ENABLE 0x40000000 // Enable transmit logic
231#define XMT_CONFIG_MAC_PARITY 0x20000000 // Inhibit MAC RAM parity error 231#define XMT_CONFIG_MAC_PARITY 0x20000000 // Inhibit MAC RAM parity error
232#define XMT_CONFIG_BUF_PARITY 0x10000000 // Inhibit D2F buffer parity error 232#define XMT_CONFIG_BUF_PARITY 0x10000000 // Inhibit D2F buffer parity error
233#define XMT_CONFIG_MEM_PARITY 0x08000000 // Inhibit 1T SRAM parity error 233#define XMT_CONFIG_MEM_PARITY 0x08000000 // Inhibit 1T SRAM parity error
@@ -249,9 +249,9 @@ typedef struct _SXG_HW_REGS {
249 249
250// A-XGMAC Configuration Register 1 250// A-XGMAC Configuration Register 1
251#define AXGMAC_CFG1_XMT_PAUSE 0x80000000 // Allow the sending of Pause frames 251#define AXGMAC_CFG1_XMT_PAUSE 0x80000000 // Allow the sending of Pause frames
252#define AXGMAC_CFG1_XMT_EN 0x40000000 // Enable transmit 252#define AXGMAC_CFG1_XMT_EN 0x40000000 // Enable transmit
253#define AXGMAC_CFG1_RCV_PAUSE 0x20000000 // Allow the detection of Pause frames 253#define AXGMAC_CFG1_RCV_PAUSE 0x20000000 // Allow the detection of Pause frames
254#define AXGMAC_CFG1_RCV_EN 0x10000000 // Enable receive 254#define AXGMAC_CFG1_RCV_EN 0x10000000 // Enable receive
255#define AXGMAC_CFG1_XMT_STATE 0x04000000 // Current transmit state - READ ONLY 255#define AXGMAC_CFG1_XMT_STATE 0x04000000 // Current transmit state - READ ONLY
256#define AXGMAC_CFG1_RCV_STATE 0x01000000 // Current receive state - READ ONLY 256#define AXGMAC_CFG1_RCV_STATE 0x01000000 // Current receive state - READ ONLY
257#define AXGMAC_CFG1_XOFF_SHORT 0x00001000 // Only pause for 64 slot on XOFF 257#define AXGMAC_CFG1_XOFF_SHORT 0x00001000 // Only pause for 64 slot on XOFF
@@ -262,24 +262,24 @@ typedef struct _SXG_HW_REGS {
262#define AXGMAC_CFG1_RCV_FCS2 0x00000200 // Delay receive FCS 2 4-byte words 262#define AXGMAC_CFG1_RCV_FCS2 0x00000200 // Delay receive FCS 2 4-byte words
263#define AXGMAC_CFG1_RCV_FCS3 0x00000300 // Delay receive FCS 3 4-byte words 263#define AXGMAC_CFG1_RCV_FCS3 0x00000300 // Delay receive FCS 3 4-byte words
264#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 // Per-packet override enable 264#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 // Per-packet override enable
265#define AXGMAC_CFG1_SWAP 0x00000040 // Byte swap enable 265#define AXGMAC_CFG1_SWAP 0x00000040 // Byte swap enable
266#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 // ASSERT srdrpfrm on short frame (<64) 266#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 // ASSERT srdrpfrm on short frame (<64)
267#define AXGMAC_CFG1_RCV_STRICT 0x00000010 // RCV only 802.3AE when CLEAR 267#define AXGMAC_CFG1_RCV_STRICT 0x00000010 // RCV only 802.3AE when CLEAR
268#define AXGMAC_CFG1_CHECK_LEN 0x00000008 // Verify frame length 268#define AXGMAC_CFG1_CHECK_LEN 0x00000008 // Verify frame length
269#define AXGMAC_CFG1_GEN_FCS 0x00000004 // Generate FCS 269#define AXGMAC_CFG1_GEN_FCS 0x00000004 // Generate FCS
270#define AXGMAC_CFG1_PAD_MASK 0x00000003 // Mask for pad bits 270#define AXGMAC_CFG1_PAD_MASK 0x00000003 // Mask for pad bits
271#define AXGMAC_CFG1_PAD_64 0x00000001 // Pad frames to 64 bytes 271#define AXGMAC_CFG1_PAD_64 0x00000001 // Pad frames to 64 bytes
272#define AXGMAC_CFG1_PAD_VLAN 0x00000002 // Detect VLAN and pad to 68 bytes 272#define AXGMAC_CFG1_PAD_VLAN 0x00000002 // Detect VLAN and pad to 68 bytes
273#define AXGMAC_CFG1_PAD_68 0x00000003 // Pad to 68 bytes 273#define AXGMAC_CFG1_PAD_68 0x00000003 // Pad to 68 bytes
274 274
275// A-XGMAC Configuration Register 2 275// A-XGMAC Configuration Register 2
276#define AXGMAC_CFG2_GEN_PAUSE 0x80000000 // Generate single pause frame (test) 276#define AXGMAC_CFG2_GEN_PAUSE 0x80000000 // Generate single pause frame (test)
277#define AXGMAC_CFG2_LF_MANUAL 0x08000000 // Manual link fault sequence 277#define AXGMAC_CFG2_LF_MANUAL 0x08000000 // Manual link fault sequence
278#define AXGMAC_CFG2_LF_AUTO 0x04000000 // Auto link fault sequence 278#define AXGMAC_CFG2_LF_AUTO 0x04000000 // Auto link fault sequence
279#define AXGMAC_CFG2_LF_REMOTE 0x02000000 // Remote link fault (READ ONLY) 279#define AXGMAC_CFG2_LF_REMOTE 0x02000000 // Remote link fault (READ ONLY)
280#define AXGMAC_CFG2_LF_LOCAL 0x01000000 // Local link fault (READ ONLY) 280#define AXGMAC_CFG2_LF_LOCAL 0x01000000 // Local link fault (READ ONLY)
281#define AXGMAC_CFG2_IPG_MASK 0x001F0000 // Inter packet gap 281#define AXGMAC_CFG2_IPG_MASK 0x001F0000 // Inter packet gap
282#define AXGMAC_CFG2_IPG_SHIFT 16 282#define AXGMAC_CFG2_IPG_SHIFT 16
283#define AXGMAC_CFG2_PAUSE_XMT 0x00008000 // Pause transmit module 283#define AXGMAC_CFG2_PAUSE_XMT 0x00008000 // Pause transmit module
284#define AXGMAC_CFG2_IPG_EXTEN 0x00000020 // Enable IPG extension algorithm 284#define AXGMAC_CFG2_IPG_EXTEN 0x00000020 // Enable IPG extension algorithm
285#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F // IPG extension 285#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F // IPG extension
@@ -299,9 +299,9 @@ typedef struct _SXG_HW_REGS {
299#define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 // Sixth octet 299#define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 // Sixth octet
300 300
301// A-XGMAC Maximum frame length register 301// A-XGMAC Maximum frame length register
302#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 // Maximum transmit frame length 302#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 // Maximum transmit frame length
303#define AXGMAC_MAXFRAME_XMT_SHIFT 16 303#define AXGMAC_MAXFRAME_XMT_SHIFT 16
304#define AXGMAC_MAXFRAME_RCV 0x0000FFFF // Maximum receive frame length 304#define AXGMAC_MAXFRAME_RCV 0x0000FFFF // Maximum receive frame length
305// This register doesn't need to be written for standard MTU. 305// This register doesn't need to be written for standard MTU.
306// For jumbo, I'll just statically define the value here. This 306// For jumbo, I'll just statically define the value here. This
307// value sets the receive byte count to 9036 (0x234C) and the 307// value sets the receive byte count to 9036 (0x234C) and the
@@ -324,34 +324,34 @@ typedef struct _SXG_HW_REGS {
324 324
325// A-XGMAC AMIIM Field Register 325// A-XGMAC AMIIM Field Register
326#define AXGMAC_AMIIM_FIELD_ST 0xC0000000 // 2-bit ST field 326#define AXGMAC_AMIIM_FIELD_ST 0xC0000000 // 2-bit ST field
327#define AXGMAC_AMIIM_FIELD_ST_SHIFT 30 327#define AXGMAC_AMIIM_FIELD_ST_SHIFT 30
328#define AXGMAC_AMIIM_FIELD_OP 0x30000000 // 2-bit OP field 328#define AXGMAC_AMIIM_FIELD_OP 0x30000000 // 2-bit OP field
329#define AXGMAC_AMIIM_FIELD_OP_SHIFT 28 329#define AXGMAC_AMIIM_FIELD_OP_SHIFT 28
330#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 // Port address field (hstphyadx in spec) 330#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 // Port address field (hstphyadx in spec)
331#define AXGMAC_AMIIM_FIELD_PORT_SHIFT 23 331#define AXGMAC_AMIIM_FIELD_PORT_SHIFT 23
332#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 // Device address field (hstregadx in spec) 332#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 // Device address field (hstregadx in spec)
333#define AXGMAC_AMIIM_FIELD_DEV_SHIFT 18 333#define AXGMAC_AMIIM_FIELD_DEV_SHIFT 18
334#define AXGMAC_AMIIM_FIELD_TA 0x00030000 // 2-bit TA field 334#define AXGMAC_AMIIM_FIELD_TA 0x00030000 // 2-bit TA field
335#define AXGMAC_AMIIM_FIELD_TA_SHIFT 16 335#define AXGMAC_AMIIM_FIELD_TA_SHIFT 16
336#define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF // Data field 336#define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF // Data field
337 337
338// Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register 338// Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register
339#define MIIM_OP_ADDR 0 // MIIM Address set operation 339#define MIIM_OP_ADDR 0 // MIIM Address set operation
340#define MIIM_OP_WRITE 1 // MIIM Write register operation 340#define MIIM_OP_WRITE 1 // MIIM Write register operation
341#define MIIM_OP_READ 2 // MIIM Read register operation 341#define MIIM_OP_READ 2 // MIIM Read register operation
342#define MIIM_OP_ADDR_SHIFT (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) 342#define MIIM_OP_ADDR_SHIFT (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT)
343 343
344// Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM Field Register 344// Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM Field Register
345#define MIIM_PORT_NUM 1 // All Sahara MIIM modules use port 1 345#define MIIM_PORT_NUM 1 // All Sahara MIIM modules use port 1
346 346
347// Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM Field Register 347// Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM Field Register
348#define MIIM_DEV_PHY_PMA 1 // PHY PMA/PMD module MIIM device number 348#define MIIM_DEV_PHY_PMA 1 // PHY PMA/PMD module MIIM device number
349#define MIIM_DEV_PHY_PCS 3 // PHY PCS module MIIM device number 349#define MIIM_DEV_PHY_PCS 3 // PHY PCS module MIIM device number
350#define MIIM_DEV_PHY_XS 4 // PHY XS module MIIM device number 350#define MIIM_DEV_PHY_XS 4 // PHY XS module MIIM device number
351#define MIIM_DEV_XGXS 5 // XGXS MIIM device number 351#define MIIM_DEV_XGXS 5 // XGXS MIIM device number
352 352
353// Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field Register 353// Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field Register
354#define MIIM_TA_10GB 2 // set to 2 for 10 GB operation 354#define MIIM_TA_10GB 2 // set to 2 for 10 GB operation
355 355
356// A-XGMAC AMIIM Configuration Register 356// A-XGMAC AMIIM Configuration Register
357#define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 // Bypass preamble of mngmt frame 357#define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 // Bypass preamble of mngmt frame
@@ -365,25 +365,25 @@ typedef struct _SXG_HW_REGS {
365#define AXGMAC_AMIIM_INDC_BUSY 0x00000001 // Set until cmd operation complete 365#define AXGMAC_AMIIM_INDC_BUSY 0x00000001 // Set until cmd operation complete
366 366
367// Link Status and Control Register 367// Link Status and Control Register
368#define LS_PHY_CLR_RESET 0x80000000 // Clear reset signal to PHY 368#define LS_PHY_CLR_RESET 0x80000000 // Clear reset signal to PHY
369#define LS_SERDES_POWER_DOWN 0x40000000 // Power down the Sahara Serdes 369#define LS_SERDES_POWER_DOWN 0x40000000 // Power down the Sahara Serdes
370#define LS_XGXS_ENABLE 0x20000000 // Enable the XAUI XGXS logic 370#define LS_XGXS_ENABLE 0x20000000 // Enable the XAUI XGXS logic
371#define LS_XGXS_CTL 0x10000000 // Hold XAUI XGXS logic reset until Serdes is up 371#define LS_XGXS_CTL 0x10000000 // Hold XAUI XGXS logic reset until Serdes is up
372#define LS_SERDES_DOWN 0x08000000 // When 0, XAUI Serdes is up and initialization is complete 372#define LS_SERDES_DOWN 0x08000000 // When 0, XAUI Serdes is up and initialization is complete
373#define LS_TRACE_DOWN 0x04000000 // When 0, Trace Serdes is up and initialization is complete 373#define LS_TRACE_DOWN 0x04000000 // When 0, Trace Serdes is up and initialization is complete
374#define LS_PHY_CLK_25MHZ 0x02000000 // Set PHY clock to 25 MHz (else 156.125 MHz) 374#define LS_PHY_CLK_25MHZ 0x02000000 // Set PHY clock to 25 MHz (else 156.125 MHz)
375#define LS_PHY_CLK_EN 0x01000000 // Enable clock to PHY 375#define LS_PHY_CLK_EN 0x01000000 // Enable clock to PHY
376#define LS_XAUI_LINK_UP 0x00000010 // XAUI link is up 376#define LS_XAUI_LINK_UP 0x00000010 // XAUI link is up
377#define LS_XAUI_LINK_CHNG 0x00000008 // XAUI link status has changed 377#define LS_XAUI_LINK_CHNG 0x00000008 // XAUI link status has changed
378#define LS_LINK_ALARM 0x00000004 // Link alarm pin 378#define LS_LINK_ALARM 0x00000004 // Link alarm pin
379#define LS_ATTN_CTRL_MASK 0x00000003 // Mask link attention control bits 379#define LS_ATTN_CTRL_MASK 0x00000003 // Mask link attention control bits
380#define LS_ATTN_ALARM 0x00000000 // 00 => Attn on link alarm 380#define LS_ATTN_ALARM 0x00000000 // 00 => Attn on link alarm
381#define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 // 01 => Attn on link alarm or status change 381#define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 // 01 => Attn on link alarm or status change
382#define LS_ATTN_STAT_CHNG 0x00000002 // 10 => Attn on link status change 382#define LS_ATTN_STAT_CHNG 0x00000002 // 10 => Attn on link status change
383#define LS_ATTN_NONE 0x00000003 // 11 => no Attn 383#define LS_ATTN_NONE 0x00000003 // 11 => no Attn
384 384
385// Link Address High Registers 385// Link Address High Registers
386#define LINK_ADDR_ENABLE 0x80000000 // Enable this link address 386#define LINK_ADDR_ENABLE 0x80000000 // Enable this link address
387 387
388 388
389/*************************************************************************** 389/***************************************************************************
@@ -396,7 +396,7 @@ typedef struct _SXG_HW_REGS {
396#define XGXS_ADDRESS_STATUS1 0x0001 // XS Status 1 396#define XGXS_ADDRESS_STATUS1 0x0001 // XS Status 1
397#define XGXS_ADDRESS_DEVID_LOW 0x0002 // XS Device ID (low) 397#define XGXS_ADDRESS_DEVID_LOW 0x0002 // XS Device ID (low)
398#define XGXS_ADDRESS_DEVID_HIGH 0x0003 // XS Device ID (high) 398#define XGXS_ADDRESS_DEVID_HIGH 0x0003 // XS Device ID (high)
399#define XGXS_ADDRESS_SPEED 0x0004 // XS Speed ability 399#define XGXS_ADDRESS_SPEED 0x0004 // XS Speed ability
400#define XGXS_ADDRESS_DEV_LOW 0x0005 // XS Devices in package 400#define XGXS_ADDRESS_DEV_LOW 0x0005 // XS Devices in package
401#define XGXS_ADDRESS_DEV_HIGH 0x0006 // XS Devices in package 401#define XGXS_ADDRESS_DEV_HIGH 0x0006 // XS Devices in package
402#define XGXS_ADDRESS_STATUS2 0x0008 // XS Status 2 402#define XGXS_ADDRESS_STATUS2 0x0008 // XS Status 2
@@ -410,27 +410,27 @@ typedef struct _SXG_HW_REGS {
410#define XGXS_ADDRESS_RESET_HI2 0x8003 // Vendor-Specific Reset Hi 2 410#define XGXS_ADDRESS_RESET_HI2 0x8003 // Vendor-Specific Reset Hi 2
411 411
412// XS Control 1 register bit definitions 412// XS Control 1 register bit definitions
413#define XGXS_CONTROL1_RESET 0x8000 // Reset - self clearing 413#define XGXS_CONTROL1_RESET 0x8000 // Reset - self clearing
414#define XGXS_CONTROL1_LOOPBACK 0x4000 // Enable loopback 414#define XGXS_CONTROL1_LOOPBACK 0x4000 // Enable loopback
415#define XGXS_CONTROL1_SPEED1 0x2000 // 0 = unspecified, 1 = 10Gb+ 415#define XGXS_CONTROL1_SPEED1 0x2000 // 0 = unspecified, 1 = 10Gb+
416#define XGXS_CONTROL1_LOWPOWER 0x0400 // 1 = Low power mode 416#define XGXS_CONTROL1_LOWPOWER 0x0400 // 1 = Low power mode
417#define XGXS_CONTROL1_SPEED2 0x0040 // Same as SPEED1 (?) 417#define XGXS_CONTROL1_SPEED2 0x0040 // Same as SPEED1 (?)
418#define XGXS_CONTROL1_SPEED 0x003C // Everything reserved except zero (?) 418#define XGXS_CONTROL1_SPEED 0x003C // Everything reserved except zero (?)
419 419
420// XS Status 1 register bit definitions 420// XS Status 1 register bit definitions
421#define XGXS_STATUS1_FAULT 0x0080 // Fault detected 421#define XGXS_STATUS1_FAULT 0x0080 // Fault detected
422#define XGXS_STATUS1_LINK 0x0004 // 1 = Link up 422#define XGXS_STATUS1_LINK 0x0004 // 1 = Link up
423#define XGXS_STATUS1_LOWPOWER 0x0002 // 1 = Low power supported 423#define XGXS_STATUS1_LOWPOWER 0x0002 // 1 = Low power supported
424 424
425// XS Speed register bit definitions 425// XS Speed register bit definitions
426#define XGXS_SPEED_10G 0x0001 // 1 = 10G capable 426#define XGXS_SPEED_10G 0x0001 // 1 = 10G capable
427 427
428// XS Devices register bit definitions 428// XS Devices register bit definitions
429#define XGXS_DEVICES_DTE 0x0020 // DTE XS Present 429#define XGXS_DEVICES_DTE 0x0020 // DTE XS Present
430#define XGXS_DEVICES_PHY 0x0010 // PHY XS Present 430#define XGXS_DEVICES_PHY 0x0010 // PHY XS Present
431#define XGXS_DEVICES_PCS 0x0008 // PCS Present 431#define XGXS_DEVICES_PCS 0x0008 // PCS Present
432#define XGXS_DEVICES_WIS 0x0004 // WIS Present 432#define XGXS_DEVICES_WIS 0x0004 // WIS Present
433#define XGXS_DEVICES_PMD 0x0002 // PMD/PMA Present 433#define XGXS_DEVICES_PMD 0x0002 // PMD/PMA Present
434#define XGXS_DEVICES_CLAUSE22 0x0001 // Clause 22 registers present 434#define XGXS_DEVICES_CLAUSE22 0x0001 // Clause 22 registers present
435 435
436// XS Devices High register bit definitions 436// XS Devices High register bit definitions
@@ -444,18 +444,18 @@ typedef struct _SXG_HW_REGS {
444#define XGXS_STATUS2_RCV_FAULT 0x0400 // Receive fault 444#define XGXS_STATUS2_RCV_FAULT 0x0400 // Receive fault
445 445
446// XS Package ID High register bit definitions 446// XS Package ID High register bit definitions
447#define XGXS_PKGID_HIGH_ORG 0xFC00 // Organizationally Unique 447#define XGXS_PKGID_HIGH_ORG 0xFC00 // Organizationally Unique
448#define XGXS_PKGID_HIGH_MFG 0x03F0 // Manufacturer Model 448#define XGXS_PKGID_HIGH_MFG 0x03F0 // Manufacturer Model
449#define XGXS_PKGID_HIGH_REV 0x000F // Revision Number 449#define XGXS_PKGID_HIGH_REV 0x000F // Revision Number
450 450
451// XS Lane Status register bit definitions 451// XS Lane Status register bit definitions
452#define XGXS_LANE_PHY 0x1000 // PHY/DTE lane alignment status 452#define XGXS_LANE_PHY 0x1000 // PHY/DTE lane alignment status
453#define XGXS_LANE_PATTERN 0x0800 // Pattern testing ability 453#define XGXS_LANE_PATTERN 0x0800 // Pattern testing ability
454#define XGXS_LANE_LOOPBACK 0x0400 // PHY loopback ability 454#define XGXS_LANE_LOOPBACK 0x0400 // PHY loopback ability
455#define XGXS_LANE_SYNC3 0x0008 // Lane 3 sync 455#define XGXS_LANE_SYNC3 0x0008 // Lane 3 sync
456#define XGXS_LANE_SYNC2 0x0004 // Lane 2 sync 456#define XGXS_LANE_SYNC2 0x0004 // Lane 2 sync
457#define XGXS_LANE_SYNC1 0x0002 // Lane 1 sync 457#define XGXS_LANE_SYNC1 0x0002 // Lane 1 sync
458#define XGXS_LANE_SYNC0 0x0001 // Lane 0 sync 458#define XGXS_LANE_SYNC0 0x0001 // Lane 0 sync
459 459
460// XS Test Control register bit definitions 460// XS Test Control register bit definitions
461#define XGXS_TEST_PATTERN_ENABLE 0x0004 // Test pattern enabled 461#define XGXS_TEST_PATTERN_ENABLE 0x0004 // Test pattern enabled
@@ -473,10 +473,10 @@ typedef struct _SXG_HW_REGS {
473// LASI (Link Alarm Status Interrupt) Registers (located in MIIM_DEV_PHY_PMA device) 473// LASI (Link Alarm Status Interrupt) Registers (located in MIIM_DEV_PHY_PMA device)
474#define LASI_RX_ALARM_CONTROL 0x9000 // LASI RX_ALARM Control 474#define LASI_RX_ALARM_CONTROL 0x9000 // LASI RX_ALARM Control
475#define LASI_TX_ALARM_CONTROL 0x9001 // LASI TX_ALARM Control 475#define LASI_TX_ALARM_CONTROL 0x9001 // LASI TX_ALARM Control
476#define LASI_CONTROL 0x9002 // LASI Control 476#define LASI_CONTROL 0x9002 // LASI Control
477#define LASI_RX_ALARM_STATUS 0x9003 // LASI RX_ALARM Status 477#define LASI_RX_ALARM_STATUS 0x9003 // LASI RX_ALARM Status
478#define LASI_TX_ALARM_STATUS 0x9004 // LASI TX_ALARM Status 478#define LASI_TX_ALARM_STATUS 0x9004 // LASI TX_ALARM Status
479#define LASI_STATUS 0x9005 // LASI Status 479#define LASI_STATUS 0x9005 // LASI Status
480 480
481// LASI_CONTROL bit definitions 481// LASI_CONTROL bit definitions
482#define LASI_CTL_RX_ALARM_ENABLE 0x0004 // Enable RX_ALARM interrupts 482#define LASI_CTL_RX_ALARM_ENABLE 0x0004 // Enable RX_ALARM interrupts
@@ -489,34 +489,34 @@ typedef struct _SXG_HW_REGS {
489#define LASI_STATUS_LS_ALARM 0x0001 // Link Status 489#define LASI_STATUS_LS_ALARM 0x0001 // Link Status
490 490
491// PHY registers - PMA/PMD (device 1) 491// PHY registers - PMA/PMD (device 1)
492#define PHY_PMA_CONTROL1 0x0000 // PMA/PMD Control 1 492#define PHY_PMA_CONTROL1 0x0000 // PMA/PMD Control 1
493#define PHY_PMA_STATUS1 0x0001 // PMA/PMD Status 1 493#define PHY_PMA_STATUS1 0x0001 // PMA/PMD Status 1
494#define PHY_PMA_RCV_DET 0x000A // PMA/PMD Receive Signal Detect 494#define PHY_PMA_RCV_DET 0x000A // PMA/PMD Receive Signal Detect
495 // other PMA/PMD registers exist and can be defined as needed 495 // other PMA/PMD registers exist and can be defined as needed
496 496
497// PHY registers - PCS (device 3) 497// PHY registers - PCS (device 3)
498#define PHY_PCS_CONTROL1 0x0000 // PCS Control 1 498#define PHY_PCS_CONTROL1 0x0000 // PCS Control 1
499#define PHY_PCS_STATUS1 0x0001 // PCS Status 1 499#define PHY_PCS_STATUS1 0x0001 // PCS Status 1
500#define PHY_PCS_10G_STATUS1 0x0020 // PCS 10GBASE-R Status 1 500#define PHY_PCS_10G_STATUS1 0x0020 // PCS 10GBASE-R Status 1
501 // other PCS registers exist and can be defined as needed 501 // other PCS registers exist and can be defined as needed
502 502
503// PHY registers - XS (device 4) 503// PHY registers - XS (device 4)
504#define PHY_XS_CONTROL1 0x0000 // XS Control 1 504#define PHY_XS_CONTROL1 0x0000 // XS Control 1
505#define PHY_XS_STATUS1 0x0001 // XS Status 1 505#define PHY_XS_STATUS1 0x0001 // XS Status 1
506#define PHY_XS_LANE_STATUS 0x0018 // XS Lane Status 506#define PHY_XS_LANE_STATUS 0x0018 // XS Lane Status
507 // other XS registers exist and can be defined as needed 507 // other XS registers exist and can be defined as needed
508 508
509// PHY_PMA_CONTROL1 register bit definitions 509// PHY_PMA_CONTROL1 register bit definitions
510#define PMA_CONTROL1_RESET 0x8000 // PMA/PMD reset 510#define PMA_CONTROL1_RESET 0x8000 // PMA/PMD reset
511 511
512// PHY_PMA_RCV_DET register bit definitions 512// PHY_PMA_RCV_DET register bit definitions
513#define PMA_RCV_DETECT 0x0001 // PMA/PMD receive signal detect 513#define PMA_RCV_DETECT 0x0001 // PMA/PMD receive signal detect
514 514
515// PHY_PCS_10G_STATUS1 register bit definitions 515// PHY_PCS_10G_STATUS1 register bit definitions
516#define PCS_10B_BLOCK_LOCK 0x0001 // PCS 10GBASE-R locked to receive blocks 516#define PCS_10B_BLOCK_LOCK 0x0001 // PCS 10GBASE-R locked to receive blocks
517 517
518// PHY_XS_LANE_STATUS register bit definitions 518// PHY_XS_LANE_STATUS register bit definitions
519#define XS_LANE_ALIGN 0x1000 // XS transmit lanes aligned 519#define XS_LANE_ALIGN 0x1000 // XS transmit lanes aligned
520 520
521// PHY Microcode download data structure 521// PHY Microcode download data structure
522typedef struct _PHY_UCODE { 522typedef struct _PHY_UCODE {
@@ -558,8 +558,8 @@ typedef struct _XMT_DESC {
558 // command codes 558 // command codes
559#define XMT_DESC_CMD_RAW_SEND 0 // raw send descriptor 559#define XMT_DESC_CMD_RAW_SEND 0 // raw send descriptor
560#define XMT_DESC_CMD_CSUM_INSERT 1 // checksum insert descriptor 560#define XMT_DESC_CMD_CSUM_INSERT 1 // checksum insert descriptor
561#define XMT_DESC_CMD_FORMAT 2 // format descriptor 561#define XMT_DESC_CMD_FORMAT 2 // format descriptor
562#define XMT_DESC_CMD_PRIME 3 // prime descriptor 562#define XMT_DESC_CMD_PRIME 3 // prime descriptor
563#define XMT_DESC_CMD_CODE_SHFT 6 // comand code shift (shift to bits [31:30] in word 0) 563#define XMT_DESC_CMD_CODE_SHFT 6 // comand code shift (shift to bits [31:30] in word 0)
564 // shifted command codes 564 // shifted command codes
565#define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT) 565#define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT)
@@ -569,22 +569,22 @@ typedef struct _XMT_DESC {
569 569
570// XMT_DESC Control Byte (XmtCtl) definitions 570// XMT_DESC Control Byte (XmtCtl) definitions
571// NOTE: These bits do not work on Sahara (Rev A)! 571// NOTE: These bits do not work on Sahara (Rev A)!
572#define XMT_CTL_PAUSE_FRAME 0x80 // current frame is a pause control frame (for statistics) 572#define XMT_CTL_PAUSE_FRAME 0x80 // current frame is a pause control frame (for statistics)
573#define XMT_CTL_CONTROL_FRAME 0x40 // current frame is a control frame (for statistics) 573#define XMT_CTL_CONTROL_FRAME 0x40 // current frame is a control frame (for statistics)
574#define XMT_CTL_PER_PKT_QUAL 0x20 // per packet qualifier 574#define XMT_CTL_PER_PKT_QUAL 0x20 // per packet qualifier
575#define XMT_CTL_PAD_MODE_NONE 0x00 // do not pad frame 575#define XMT_CTL_PAD_MODE_NONE 0x00 // do not pad frame
576#define XMT_CTL_PAD_MODE_64 0x08 // pad frame to 64 bytes 576#define XMT_CTL_PAD_MODE_64 0x08 // pad frame to 64 bytes
577#define XMT_CTL_PAD_MODE_VLAN_68 0x10 // pad frame to 64 bytes, and VLAN frames to 68 bytes 577#define XMT_CTL_PAD_MODE_VLAN_68 0x10 // pad frame to 64 bytes, and VLAN frames to 68 bytes
578#define XMT_CTL_PAD_MODE_68 0x18 // pad frame to 68 bytes 578#define XMT_CTL_PAD_MODE_68 0x18 // pad frame to 68 bytes
579#define XMT_CTL_GEN_FCS 0x04 // generate FCS (CRC) for this frame 579#define XMT_CTL_GEN_FCS 0x04 // generate FCS (CRC) for this frame
580#define XMT_CTL_DELAY_FCS_0 0x00 // do not delay FCS calcution 580#define XMT_CTL_DELAY_FCS_0 0x00 // do not delay FCS calcution
581#define XMT_CTL_DELAY_FCS_1 0x01 // delay FCS calculation by 1 (4-byte) word 581#define XMT_CTL_DELAY_FCS_1 0x01 // delay FCS calculation by 1 (4-byte) word
582#define XMT_CTL_DELAY_FCS_2 0x02 // delay FCS calculation by 2 (4-byte) words 582#define XMT_CTL_DELAY_FCS_2 0x02 // delay FCS calculation by 2 (4-byte) words
583#define XMT_CTL_DELAY_FCS_3 0x03 // delay FCS calculation by 3 (4-byte) words 583#define XMT_CTL_DELAY_FCS_3 0x03 // delay FCS calculation by 3 (4-byte) words
584 584
585// XMT_DESC XmtBufId definition 585// XMT_DESC XmtBufId definition
586#define XMT_BUF_ID_SHFT 8 // The Xmt buffer ID is formed by dividing 586#define XMT_BUF_ID_SHFT 8 // The Xmt buffer ID is formed by dividing
587 // the buffer (DRAM) address by 256 (or << 8) 587 // the buffer (DRAM) address by 256 (or << 8)
588 588
589/***************************************************************************** 589/*****************************************************************************
590 * Receiver Sequencer Definitions 590 * Receiver Sequencer Definitions
@@ -594,8 +594,8 @@ typedef struct _XMT_DESC {
594#define RCV_EVTQ_RBFID_MASK 0x0000FFFF // bit mask for the Receive Buffer ID 594#define RCV_EVTQ_RBFID_MASK 0x0000FFFF // bit mask for the Receive Buffer ID
595 595
596// Receive Buffer ID definition 596// Receive Buffer ID definition
597#define RCV_BUF_ID_SHFT 5 // The Rcv buffer ID is formed by dividing 597#define RCV_BUF_ID_SHFT 5 // The Rcv buffer ID is formed by dividing
598 // the buffer (DRAM) address by 32 (or << 5) 598 // the buffer (DRAM) address by 32 (or << 5)
599 599
600// Format of the 18 byte Receive Buffer returned by the 600// Format of the 18 byte Receive Buffer returned by the
601// Receive Sequencer for received packets 601// Receive Sequencer for received packets
@@ -623,48 +623,48 @@ typedef struct _RCV_BUF_HDR {
623 * Queue definitions 623 * Queue definitions
624 *****************************************************************************/ 624 *****************************************************************************/
625 625
626// Ingress (read only) queue numbers 626/* Ingress (read only) queue numbers */
627#define PXY_BUF_Q 0 // Proxy Buffer Queue 627#define PXY_BUF_Q 0 /* Proxy Buffer Queue */
628#define HST_EVT_Q 1 // Host Event Queue 628#define HST_EVT_Q 1 /* Host Event Queue */
629#define XMT_BUF_Q 2 // Transmit Buffer Queue 629#define XMT_BUF_Q 2 /* Transmit Buffer Queue */
630#define SKT_EVL_Q 3 // RcvSqr Socket Event Low Priority Queue 630#define SKT_EVL_Q 3 /* RcvSqr Socket Event Low Priority Queue */
631#define RCV_EVL_Q 4 // RcvSqr Rcv Event Low Priority Queue 631#define RCV_EVL_Q 4 /* RcvSqr Rcv Event Low Priority Queue */
632#define SKT_EVH_Q 5 // RcvSqr Socket Event High Priority Queue 632#define SKT_EVH_Q 5 /* RcvSqr Socket Event High Priority Queue */
633#define RCV_EVH_Q 6 // RcvSqr Rcv Event High Priority Queue 633#define RCV_EVH_Q 6 /* RcvSqr Rcv Event High Priority Queue */
634#define DMA_RSP_Q 7 // Dma Response Queue - one per CPU context 634#define DMA_RSP_Q 7 /* Dma Response Queue - one per CPU context */
635// Local (read/write) queue numbers 635/* Local (read/write) queue numbers */
636#define LOCAL_A_Q 8 // Spare local Queue 636#define LOCAL_A_Q 8 /* Spare local Queue */
637#define LOCAL_B_Q 9 // Spare local Queue 637#define LOCAL_B_Q 9 /* Spare local Queue */
638#define LOCAL_C_Q 10 // Spare local Queue 638#define LOCAL_C_Q 10 /* Spare local Queue */
639#define FSM_EVT_Q 11 // Finite-State-Machine Event Queue 639#define FSM_EVT_Q 11 /* Finite-State-Machine Event Queue */
640#define SBF_PAL_Q 12 // System Buffer Physical Address (low) Queue 640#define SBF_PAL_Q 12 /* System Buffer Physical Address (low) Queue */
641#define SBF_PAH_Q 13 // System Buffer Physical Address (high) Queue 641#define SBF_PAH_Q 13 /* System Buffer Physical Address (high) Queue */
642#define SBF_VAL_Q 14 // System Buffer Virtual Address (low) Queue 642#define SBF_VAL_Q 14 /* System Buffer Virtual Address (low) Queue */
643#define SBF_VAH_Q 15 // System Buffer Virtual Address (high) Queue 643#define SBF_VAH_Q 15 /* System Buffer Virtual Address (high) Queue */
644// Egress (write only) queue numbers 644/* Egress (write only) queue numbers */
645#define H2G_CMD_Q 16 // Host to GlbRam DMA Command Queue 645#define H2G_CMD_Q 16 /* Host to GlbRam DMA Command Queue */
646#define H2D_CMD_Q 17 // Host to DRAM DMA Command Queue 646#define H2D_CMD_Q 17 /* Host to DRAM DMA Command Queue */
647#define G2H_CMD_Q 18 // GlbRam to Host DMA Command Queue 647#define G2H_CMD_Q 18 /* GlbRam to Host DMA Command Queue */
648#define G2D_CMD_Q 19 // GlbRam to DRAM DMA Command Queue 648#define G2D_CMD_Q 19 /* GlbRam to DRAM DMA Command Queue */
649#define D2H_CMD_Q 20 // DRAM to Host DMA Command Queue 649#define D2H_CMD_Q 20 /* DRAM to Host DMA Command Queue */
650#define D2G_CMD_Q 21 // DRAM to GlbRam DMA Command Queue 650#define D2G_CMD_Q 21 /* DRAM to GlbRam DMA Command Queue */
651#define D2D_CMD_Q 22 // DRAM to DRAM DMA Command Queue 651#define D2D_CMD_Q 22 /* DRAM to DRAM DMA Command Queue */
652#define PXL_CMD_Q 23 // Low Priority Proxy Command Queue 652#define PXL_CMD_Q 23 /* Low Priority Proxy Command Queue */
653#define PXH_CMD_Q 24 // High Priority Proxy Command Queue 653#define PXH_CMD_Q 24 /* High Priority Proxy Command Queue */
654#define RSQ_CMD_Q 25 // Receive Sequencer Command Queue 654#define RSQ_CMD_Q 25 /* Receive Sequencer Command Queue */
655#define RCV_BUF_Q 26 // Receive Buffer Queue 655#define RCV_BUF_Q 26 /* Receive Buffer Queue */
656 656
657// Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) 657/* Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q) */
658#define PXY_COPY_EN 0x00200000 // enable copy of xmt descriptor to xmt command queue 658#define PXY_COPY_EN 0x00200000 /* enable copy of xmt descriptor to xmt command queue */
659#define PXY_SIZE_16 0x00000000 // copy 16 bytes 659#define PXY_SIZE_16 0x00000000 /* copy 16 bytes */
660#define PXY_SIZE_32 0x00100000 // copy 32 bytes 660#define PXY_SIZE_32 0x00100000 /* copy 32 bytes */
661 661
662/***************************************************************************** 662/*****************************************************************************
663 * SXG EEPROM/Flash Configuration Definitions 663 * SXG EEPROM/Flash Configuration Definitions
664 *****************************************************************************/ 664 *****************************************************************************/
665#pragma pack(push, 1) 665#pragma pack(push, 1)
666 666
667// 667/* */
668typedef struct _HW_CFG_DATA { 668typedef struct _HW_CFG_DATA {
669 ushort Addr; 669 ushort Addr;
670 union { 670 union {
@@ -673,22 +673,22 @@ typedef struct _HW_CFG_DATA {
673 }; 673 };
674} HW_CFG_DATA, *PHW_CFG_DATA; 674} HW_CFG_DATA, *PHW_CFG_DATA;
675 675
676// 676/* */
677#define NUM_HW_CFG_ENTRIES ((128/sizeof(HW_CFG_DATA)) - 4) 677#define NUM_HW_CFG_ENTRIES ((128/sizeof(HW_CFG_DATA)) - 4)
678 678
679// MAC address 679/* MAC address */
680typedef struct _SXG_CONFIG_MAC { 680typedef struct _SXG_CONFIG_MAC {
681 unsigned char MacAddr[6]; // MAC Address 681 unsigned char MacAddr[6]; /* MAC Address */
682} SXG_CONFIG_MAC, *PSXG_CONFIG_MAC; 682} SXG_CONFIG_MAC, *PSXG_CONFIG_MAC;
683 683
684// 684/* */
685typedef struct _ATK_FRU { 685typedef struct _ATK_FRU {
686 unsigned char PartNum[6]; 686 unsigned char PartNum[6];
687 unsigned char Revision[2]; 687 unsigned char Revision[2];
688 unsigned char Serial[14]; 688 unsigned char Serial[14];
689} ATK_FRU, *PATK_FRU; 689} ATK_FRU, *PATK_FRU;
690 690
691// OEM FRU Format types 691/* OEM FRU Format types */
692#define ATK_FRU_FORMAT 0x0000 692#define ATK_FRU_FORMAT 0x0000
693#define CPQ_FRU_FORMAT 0x0001 693#define CPQ_FRU_FORMAT 0x0001
694#define DELL_FRU_FORMAT 0x0002 694#define DELL_FRU_FORMAT 0x0002
@@ -697,24 +697,24 @@ typedef struct _ATK_FRU {
697#define EMC_FRU_FORMAT 0x0005 697#define EMC_FRU_FORMAT 0x0005
698#define NO_FRU_FORMAT 0xFFFF 698#define NO_FRU_FORMAT 0xFFFF
699 699
700// EEPROM/Flash Format 700/* EEPROM/Flash Format */
701typedef struct _SXG_CONFIG { 701typedef struct _SXG_CONFIG {
702 // 702 /* */
703 // Section 1 (128 bytes) 703 /* Section 1 (128 bytes) */
704 // 704 /* */
705 ushort MagicWord; // EEPROM/FLASH Magic code 'A5A5' 705 ushort MagicWord; /* EEPROM/FLASH Magic code 'A5A5' */
706 ushort SpiClks; // SPI bus clock dividers 706 ushort SpiClks; /* SPI bus clock dividers */
707 HW_CFG_DATA HwCfg[NUM_HW_CFG_ENTRIES]; 707 HW_CFG_DATA HwCfg[NUM_HW_CFG_ENTRIES];
708 // 708 /* */
709 // 709 /* */
710 // 710 /* */
711 ushort Version; // EEPROM format version 711 ushort Version; /* EEPROM format version */
712 SXG_CONFIG_MAC MacAddr[4]; // space for 4 MAC addresses 712 SXG_CONFIG_MAC MacAddr[4]; /* space for 4 MAC addresses */
713 ATK_FRU AtkFru; // FRU information 713 ATK_FRU AtkFru; /* FRU information */
714 ushort OemFruFormat; // OEM FRU format type 714 ushort OemFruFormat; /* OEM FRU format type */
715 unsigned char OemFru[76]; // OEM FRU information (optional) 715 unsigned char OemFru[76]; /* OEM FRU information (optional) */
716 ushort Checksum; // Checksum of section 2 716 ushort Checksum; /* Checksum of section 2 */
717 // CS info XXXTODO 717 /* CS info XXXTODO */
718} SXG_CONFIG, *PSXG_CONFIG; 718} SXG_CONFIG, *PSXG_CONFIG;
719#pragma pack(pop) 719#pragma pack(pop)
720 720
@@ -723,12 +723,12 @@ typedef struct _SXG_CONFIG {
723 *****************************************************************************/ 723 *****************************************************************************/
724 724
725// Sahara (ASIC level) defines 725// Sahara (ASIC level) defines
726#define SAHARA_GRAM_SIZE 0x020000 // GRAM size - 128 KB 726#define SAHARA_GRAM_SIZE 0x020000 // GRAM size - 128 KB
727#define SAHARA_DRAM_SIZE 0x200000 // DRAM size - 2 MB 727#define SAHARA_DRAM_SIZE 0x200000 // DRAM size - 2 MB
728#define SAHARA_QRAM_SIZE 0x004000 // QRAM size - 16K entries (64 KB) 728#define SAHARA_QRAM_SIZE 0x004000 // QRAM size - 16K entries (64 KB)
729#define SAHARA_WCS_SIZE 0x002000 // WCS - 8K instructions (x 108 bits) 729#define SAHARA_WCS_SIZE 0x002000 // WCS - 8K instructions (x 108 bits)
730 730
731// Arabia (board level) defines 731// Arabia (board level) defines
732#define FLASH_SIZE 0x080000 // 512 KB (4 Mb) 732#define FLASH_SIZE 0x080000 // 512 KB (4 Mb)
733#define EEPROM_SIZE_XFMR 512 // true EEPROM size (bytes), including xfmr area 733#define EEPROM_SIZE_XFMR 512 // true EEPROM size (bytes), including xfmr area
734#define EEPROM_SIZE_NO_XFMR 256 // EEPROM size excluding xfmr area 734#define EEPROM_SIZE_NO_XFMR 256 // EEPROM size excluding xfmr area
diff --git a/drivers/staging/sxg/sxgphycode.h b/drivers/staging/sxg/sxgphycode.h
index 26b36c81eb1a..8dbaeda7eca4 100644
--- a/drivers/staging/sxg/sxgphycode.h
+++ b/drivers/staging/sxg/sxgphycode.h
@@ -34,7 +34,7 @@ static PHY_UCODE PhyUcode[] = {
34 */ 34 */
35 /* Addr, Data */ 35 /* Addr, Data */
36 {0xc017, 0xfeb0}, /* flip RX_LOS polarity (mandatory */ 36 {0xc017, 0xfeb0}, /* flip RX_LOS polarity (mandatory */
37 /* patch for SFP+ applications) */ 37 /* patch for SFP+ applications) */
38 {0xC001, 0x0428}, /* flip RX serial polarity */ 38 {0xC001, 0x0428}, /* flip RX serial polarity */
39 39
40 {0xc013, 0xf341}, /* invert lxmit clock (mandatory patch) */ 40 {0xc013, 0xf341}, /* invert lxmit clock (mandatory patch) */
@@ -43,7 +43,7 @@ static PHY_UCODE PhyUcode[] = {
43 {0xc210, 0x8000}, /* reset datapath (mandatory patch) */ 43 {0xc210, 0x8000}, /* reset datapath (mandatory patch) */
44 {0xc210, 0x0000}, /* reset datapath (mandatory patch) */ 44 {0xc210, 0x0000}, /* reset datapath (mandatory patch) */
45 {0x0000, 0x0032}, /* wait for 50ms for datapath reset to */ 45 {0x0000, 0x0032}, /* wait for 50ms for datapath reset to */
46 /* complete. (mandatory patch) */ 46 /* complete. (mandatory patch) */
47 47
48 /* Configure the LED's */ 48 /* Configure the LED's */
49 {0xc214, 0x0099}, /* configure the LED drivers */ 49 {0xc214, 0x0099}, /* configure the LED drivers */
@@ -52,15 +52,15 @@ static PHY_UCODE PhyUcode[] = {
52 52
53 /* Transceiver-specific MDIO Patches: */ 53 /* Transceiver-specific MDIO Patches: */
54 {0xc010, 0x448a}, /* (bit 14) mask out high BER input from the */ 54 {0xc010, 0x448a}, /* (bit 14) mask out high BER input from the */
55 /* LOS signal in 1.000A */ 55 /* LOS signal in 1.000A */
56 /* (mandatory patch for SR code)*/ 56 /* (mandatory patch for SR code) */
57 {0xc003, 0x0181}, /* (bit 7) enable the CDR inc setting in */ 57 {0xc003, 0x0181}, /* (bit 7) enable the CDR inc setting in */
58 /* 1.C005 (mandatory patch for SR code) */ 58 /* 1.C005 (mandatory patch for SR code) */
59 59
60 /* Transceiver-specific Microcontroller Initialization: */ 60 /* Transceiver-specific Microcontroller Initialization: */
61 {0xc04a, 0x5200}, /* activate microcontroller and pause */ 61 {0xc04a, 0x5200}, /* activate microcontroller and pause */
62 {0x0000, 0x0032}, /* wait 50ms for microcontroller before */ 62 {0x0000, 0x0032}, /* wait 50ms for microcontroller before */
63 /* writing in code. */ 63 /* writing in code. */
64 64
65 /* code block starts here: */ 65 /* code block starts here: */
66 {0xcc00, 0x2009}, 66 {0xcc00, 0x2009},
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index e64918f42ff7..72e209276ea7 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -221,7 +221,7 @@ static void usbip_dump_request_type(__u8 rt)
221static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd) 221static void usbip_dump_usb_ctrlrequest(struct usb_ctrlrequest *cmd)
222{ 222{
223 if (!cmd) { 223 if (!cmd) {
224 printk(" %s : null pointer\n", __FUNCTION__); 224 printk(" %s : null pointer\n", __func__);
225 return; 225 return;
226 } 226 }
227 227
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 933ccaf50afb..58e3995d0e2c 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -202,7 +202,7 @@ static void vhci_rx_pdu(struct usbip_device *ud)
202 ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0); 202 ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
203 if (ret != sizeof(pdu)) { 203 if (ret != sizeof(pdu)) {
204 uerr("receiving pdu failed! size is %d, should be %d\n", 204 uerr("receiving pdu failed! size is %d, should be %d\n",
205 ret, sizeof(pdu)); 205 ret, (unsigned int)sizeof(pdu));
206 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); 206 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
207 return; 207 return;
208 } 208 }
diff --git a/drivers/staging/winbond/Kconfig b/drivers/staging/winbond/Kconfig
index 10d72bec88a9..425219ed7ab9 100644
--- a/drivers/staging/winbond/Kconfig
+++ b/drivers/staging/winbond/Kconfig
@@ -1,6 +1,6 @@
1config W35UND 1config W35UND
2 tristate "Winbond driver" 2 tristate "Winbond driver"
3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL && !4KSTACKS 3 depends on MAC80211 && WLAN_80211 && USB && EXPERIMENTAL && !4KSTACKS
4 default n 4 default n
5 ---help--- 5 ---help---
6 This is highly experimental driver for winbond wifi card on some Kohjinsha notebooks 6 This is highly experimental driver for winbond wifi card on some Kohjinsha notebooks
diff --git a/drivers/staging/winbond/README b/drivers/staging/winbond/README
index 707b6b354dc5..cb944e4bf174 100644
--- a/drivers/staging/winbond/README
+++ b/drivers/staging/winbond/README
@@ -5,6 +5,7 @@ TODO:
5 - remove typedefs 5 - remove typedefs
6 - remove unused ioctls 6 - remove unused ioctls
7 - use cfg80211 for regulatory stuff 7 - use cfg80211 for regulatory stuff
8 - fix 4k stack problems
8 9
9Please send patches to Greg Kroah-Hartman <greg@kroah.com> and 10Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
10Pavel Machek <pavel@suse.cz> 11Pavel Machek <pavel@suse.cz>
diff --git a/drivers/staging/winbond/bss_f.h b/drivers/staging/winbond/bss_f.h
index c957bc94f08d..013183153993 100644
--- a/drivers/staging/winbond/bss_f.h
+++ b/drivers/staging/winbond/bss_f.h
@@ -24,7 +24,7 @@ void DesiredRate2InfoElement(PWB32_ADAPTER Adapter, u8 *addr, u16 *iFildOffset,
24 u8 *pBasicRateSet, u8 BasicRateCount, 24 u8 *pBasicRateSet, u8 BasicRateCount,
25 u8 *pOperationRateSet, u8 OperationRateCount); 25 u8 *pOperationRateSet, u8 OperationRateCount);
26void BSSAddIBSSdata(PWB32_ADAPTER Adapter, PWB_BSSDESCRIPTION psDesData); 26void BSSAddIBSSdata(PWB32_ADAPTER Adapter, PWB_BSSDESCRIPTION psDesData);
27unsigned char boCmpMacAddr( PUCHAR, PUCHAR ); 27unsigned char boCmpMacAddr( u8 *, u8 *);
28unsigned char boCmpSSID(struct SSID_Element *psSSID1, struct SSID_Element *psSSID2); 28unsigned char boCmpSSID(struct SSID_Element *psSSID1, struct SSID_Element *psSSID2);
29u16 wBSSfindSSID(PWB32_ADAPTER Adapter, struct SSID_Element *psSsid); 29u16 wBSSfindSSID(PWB32_ADAPTER Adapter, struct SSID_Element *psSsid);
30u16 wRoamingQuery(PWB32_ADAPTER Adapter); 30u16 wRoamingQuery(PWB32_ADAPTER Adapter);
@@ -42,11 +42,11 @@ void RateReSortForSRate(PWB32_ADAPTER Adapter, u8 *RateArray, u8 num);
42void Assemble_IE(PWB32_ADAPTER Adapter, u16 wBssIdx); 42void Assemble_IE(PWB32_ADAPTER Adapter, u16 wBssIdx);
43void SetMaxTxRate(PWB32_ADAPTER Adapter); 43void SetMaxTxRate(PWB32_ADAPTER Adapter);
44 44
45void CreateWpaIE(PWB32_ADAPTER Adapter, u16* iFildOffset, PUCHAR msg, struct Management_Frame* msgHeader, 45void CreateWpaIE(PWB32_ADAPTER Adapter, u16* iFildOffset, u8 *msg, struct Management_Frame* msgHeader,
46 struct Association_Request_Frame_Body* msgBody, u16 iMSindex); //added by WS 05/14/05 46 struct Association_Request_Frame_Body* msgBody, u16 iMSindex); //added by WS 05/14/05
47 47
48#ifdef _WPA2_ 48#ifdef _WPA2_
49void CreateRsnIE(PWB32_ADAPTER Adapter, u16* iFildOffset, PUCHAR msg, struct Management_Frame* msgHeader, 49void CreateRsnIE(PWB32_ADAPTER Adapter, u16* iFildOffset, u8 *msg, struct Management_Frame* msgHeader,
50 struct Association_Request_Frame_Body* msgBody, u16 iMSindex);//added by WS 05/14/05 50 struct Association_Request_Frame_Body* msgBody, u16 iMSindex);//added by WS 05/14/05
51 51
52u16 SearchPmkid(PWB32_ADAPTER Adapter, struct Management_Frame* msgHeader, 52u16 SearchPmkid(PWB32_ADAPTER Adapter, struct Management_Frame* msgHeader,
diff --git a/drivers/staging/winbond/ds_tkip.h b/drivers/staging/winbond/ds_tkip.h
index 29e5055b45a1..6841d66e7e8c 100644
--- a/drivers/staging/winbond/ds_tkip.h
+++ b/drivers/staging/winbond/ds_tkip.h
@@ -25,9 +25,9 @@ typedef struct tkip
25 s32 bytes_in_M; // # bytes in M 25 s32 bytes_in_M; // # bytes in M
26} tkip_t; 26} tkip_t;
27 27
28//void _append_data( PUCHAR pData, u16 size, tkip_t *p ); 28//void _append_data( u8 *pData, u16 size, tkip_t *p );
29void Mds_MicGet( void* Adapter, void* pRxLayer1, PUCHAR pKey, PUCHAR pMic ); 29void Mds_MicGet( void* Adapter, void* pRxLayer1, u8 *pKey, u8 *pMic );
30void Mds_MicFill( void* Adapter, void* pDes, PUCHAR XmitBufAddress ); 30void Mds_MicFill( void* Adapter, void* pDes, u8 *XmitBufAddress );
31 31
32 32
33 33
diff --git a/drivers/staging/winbond/linux/common.h b/drivers/staging/winbond/linux/common.h
index 6b00bad74f78..712a86cfa68b 100644
--- a/drivers/staging/winbond/linux/common.h
+++ b/drivers/staging/winbond/linux/common.h
@@ -39,14 +39,6 @@
39// Common type definition 39// Common type definition
40//=============================================================== 40//===============================================================
41 41
42typedef u8* PUCHAR;
43typedef s8* PCHAR;
44typedef u8* PBOOLEAN;
45typedef u16* PUSHORT;
46typedef u32* PULONG;
47typedef s16* PSHORT;
48
49
50//=========================================== 42//===========================================
51#define IGNORE 2 43#define IGNORE 2
52#define SUCCESS 1 44#define SUCCESS 1
@@ -110,16 +102,9 @@ typedef struct urb * PURB;
110#define OS_ATOMIC_READ( _A, _V ) _V 102#define OS_ATOMIC_READ( _A, _V ) _V
111#define OS_ATOMIC_INC( _A, _V ) EncapAtomicInc( _A, (void*)_V ) 103#define OS_ATOMIC_INC( _A, _V ) EncapAtomicInc( _A, (void*)_V )
112#define OS_ATOMIC_DEC( _A, _V ) EncapAtomicDec( _A, (void*)_V ) 104#define OS_ATOMIC_DEC( _A, _V ) EncapAtomicDec( _A, (void*)_V )
113#define OS_MEMORY_CLEAR( _A, _S ) memset( (PUCHAR)_A,0,_S) 105#define OS_MEMORY_CLEAR( _A, _S ) memset( (u8 *)_A,0,_S)
114#define OS_MEMORY_COMPARE( _A, _B, _S ) (memcmp(_A,_B,_S)? 0 : 1) // Definition is reverse with Ndis 1: the same 0: different 106#define OS_MEMORY_COMPARE( _A, _B, _S ) (memcmp(_A,_B,_S)? 0 : 1) // Definition is reverse with Ndis 1: the same 0: different
115 107
116
117#define OS_SPIN_LOCK spinlock_t
118#define OS_SPIN_LOCK_ALLOCATE( _S ) spin_lock_init( _S );
119#define OS_SPIN_LOCK_FREE( _S )
120#define OS_SPIN_LOCK_ACQUIRED( _S ) spin_lock_irq( _S )
121#define OS_SPIN_LOCK_RELEASED( _S ) spin_unlock_irq( _S );
122
123#define OS_TIMER struct timer_list 108#define OS_TIMER struct timer_list
124#define OS_TIMER_INITIAL( _T, _F, _P ) \ 109#define OS_TIMER_INITIAL( _T, _F, _P ) \
125{ \ 110{ \
diff --git a/drivers/staging/winbond/linux/wb35reg.c b/drivers/staging/winbond/linux/wb35reg.c
index 2c0b454e8cad..ebb6db5438a4 100644
--- a/drivers/staging/winbond/linux/wb35reg.c
+++ b/drivers/staging/winbond/linux/wb35reg.c
@@ -10,7 +10,7 @@ extern void phy_calibration_winbond(hw_data_t *phw_data, u32 frequency);
10// Flag : AUTO_INCREMENT - RegisterNo will auto increment 4 10// Flag : AUTO_INCREMENT - RegisterNo will auto increment 4
11// NO_INCREMENT - Function will write data into the same register 11// NO_INCREMENT - Function will write data into the same register
12unsigned char 12unsigned char
13Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8 NumberOfData, u8 Flag) 13Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterData, u8 NumberOfData, u8 Flag)
14{ 14{
15 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 15 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
16 PURB pUrb = NULL; 16 PURB pUrb = NULL;
@@ -30,13 +30,13 @@ Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8
30 if( pUrb && pRegQueue ) { 30 if( pUrb && pRegQueue ) {
31 pRegQueue->DIRECT = 2;// burst write register 31 pRegQueue->DIRECT = 2;// burst write register
32 pRegQueue->INDEX = RegisterNo; 32 pRegQueue->INDEX = RegisterNo;
33 pRegQueue->pBuffer = (PULONG)((PUCHAR)pRegQueue + sizeof(REG_QUEUE)); 33 pRegQueue->pBuffer = (u32 *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
34 memcpy( pRegQueue->pBuffer, pRegisterData, DataSize ); 34 memcpy( pRegQueue->pBuffer, pRegisterData, DataSize );
35 //the function for reversing register data from little endian to big endian 35 //the function for reversing register data from little endian to big endian
36 for( i=0; i<NumberOfData ; i++ ) 36 for( i=0; i<NumberOfData ; i++ )
37 pRegQueue->pBuffer[i] = cpu_to_le32( pRegQueue->pBuffer[i] ); 37 pRegQueue->pBuffer[i] = cpu_to_le32( pRegQueue->pBuffer[i] );
38 38
39 dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE) + DataSize); 39 dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE) + DataSize);
40 dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE; 40 dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE;
41 dr->bRequest = 0x04; // USB or vendor-defined request code, burst mode 41 dr->bRequest = 0x04; // USB or vendor-defined request code, burst mode
42 dr->wValue = cpu_to_le16( Flag ); // 0: Register number auto-increment, 1: No auto increment 42 dr->wValue = cpu_to_le16( Flag ); // 0: Register number auto-increment, 1: No auto increment
@@ -46,14 +46,14 @@ Wb35Reg_BurstWrite(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8
46 pRegQueue->pUsbReq = dr; 46 pRegQueue->pUsbReq = dr;
47 pRegQueue->pUrb = pUrb; 47 pRegQueue->pUrb = pUrb;
48 48
49 OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock ); 49 spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
50 if (pWb35Reg->pRegFirst == NULL) 50 if (pWb35Reg->pRegFirst == NULL)
51 pWb35Reg->pRegFirst = pRegQueue; 51 pWb35Reg->pRegFirst = pRegQueue;
52 else 52 else
53 pWb35Reg->pRegLast->Next = pRegQueue; 53 pWb35Reg->pRegLast->Next = pRegQueue;
54 pWb35Reg->pRegLast = pRegQueue; 54 pWb35Reg->pRegLast = pRegQueue;
55 55
56 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 56 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
57 57
58 // Start EP0VM 58 // Start EP0VM
59 Wb35Reg_EP0VM_start(pHwData); 59 Wb35Reg_EP0VM_start(pHwData);
@@ -181,7 +181,7 @@ Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue )
181 pRegQueue->INDEX = RegisterNo; 181 pRegQueue->INDEX = RegisterNo;
182 pRegQueue->VALUE = cpu_to_le32(RegisterValue); 182 pRegQueue->VALUE = cpu_to_le32(RegisterValue);
183 pRegQueue->RESERVED_VALID = FALSE; 183 pRegQueue->RESERVED_VALID = FALSE;
184 dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE)); 184 dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
185 dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE; 185 dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE;
186 dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode 186 dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode
187 dr->wValue = cpu_to_le16(0x0); 187 dr->wValue = cpu_to_le16(0x0);
@@ -193,14 +193,14 @@ Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue )
193 pRegQueue->pUsbReq = dr; 193 pRegQueue->pUsbReq = dr;
194 pRegQueue->pUrb = pUrb; 194 pRegQueue->pUrb = pUrb;
195 195
196 OS_SPIN_LOCK_ACQUIRED(&pWb35Reg->EP0VM_spin_lock ); 196 spin_lock_irq(&pWb35Reg->EP0VM_spin_lock );
197 if (pWb35Reg->pRegFirst == NULL) 197 if (pWb35Reg->pRegFirst == NULL)
198 pWb35Reg->pRegFirst = pRegQueue; 198 pWb35Reg->pRegFirst = pRegQueue;
199 else 199 else
200 pWb35Reg->pRegLast->Next = pRegQueue; 200 pWb35Reg->pRegLast->Next = pRegQueue;
201 pWb35Reg->pRegLast = pRegQueue; 201 pWb35Reg->pRegLast = pRegQueue;
202 202
203 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 203 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
204 204
205 // Start EP0VM 205 // Start EP0VM
206 Wb35Reg_EP0VM_start(pHwData); 206 Wb35Reg_EP0VM_start(pHwData);
@@ -220,7 +220,7 @@ Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue )
220// FALSE : register not support 220// FALSE : register not support
221unsigned char 221unsigned char
222Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue, 222Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue,
223 PCHAR pValue, s8 Len) 223 s8 *pValue, s8 Len)
224{ 224{
225 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 225 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
226 struct usb_ctrlrequest *dr; 226 struct usb_ctrlrequest *dr;
@@ -243,7 +243,7 @@ Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 Register
243 //NOTE : Users must guarantee the size of value will not exceed the buffer size. 243 //NOTE : Users must guarantee the size of value will not exceed the buffer size.
244 memcpy(pRegQueue->RESERVED, pValue, Len); 244 memcpy(pRegQueue->RESERVED, pValue, Len);
245 pRegQueue->RESERVED_VALID = TRUE; 245 pRegQueue->RESERVED_VALID = TRUE;
246 dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE)); 246 dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
247 dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE; 247 dr->bRequestType = USB_TYPE_VENDOR|USB_DIR_OUT |USB_RECIP_DEVICE;
248 dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode 248 dr->bRequest = 0x03; // USB or vendor-defined request code, burst mode
249 dr->wValue = cpu_to_le16(0x0); 249 dr->wValue = cpu_to_le16(0x0);
@@ -254,14 +254,14 @@ Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 Register
254 pRegQueue->Next = NULL; 254 pRegQueue->Next = NULL;
255 pRegQueue->pUsbReq = dr; 255 pRegQueue->pUsbReq = dr;
256 pRegQueue->pUrb = pUrb; 256 pRegQueue->pUrb = pUrb;
257 OS_SPIN_LOCK_ACQUIRED (&pWb35Reg->EP0VM_spin_lock ); 257 spin_lock_irq (&pWb35Reg->EP0VM_spin_lock );
258 if( pWb35Reg->pRegFirst == NULL ) 258 if( pWb35Reg->pRegFirst == NULL )
259 pWb35Reg->pRegFirst = pRegQueue; 259 pWb35Reg->pRegFirst = pRegQueue;
260 else 260 else
261 pWb35Reg->pRegLast->Next = pRegQueue; 261 pWb35Reg->pRegLast->Next = pRegQueue;
262 pWb35Reg->pRegLast = pRegQueue; 262 pWb35Reg->pRegLast = pRegQueue;
263 263
264 OS_SPIN_LOCK_RELEASED ( &pWb35Reg->EP0VM_spin_lock ); 264 spin_unlock_irq ( &pWb35Reg->EP0VM_spin_lock );
265 265
266 // Start EP0VM 266 // Start EP0VM
267 Wb35Reg_EP0VM_start(pHwData); 267 Wb35Reg_EP0VM_start(pHwData);
@@ -278,10 +278,10 @@ Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, u16 RegisterNo, u32 Register
278// FALSE : register not support 278// FALSE : register not support
279// pRegisterValue : It must be a resident buffer due to asynchronous read register. 279// pRegisterValue : It must be a resident buffer due to asynchronous read register.
280unsigned char 280unsigned char
281Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue ) 281Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue )
282{ 282{
283 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 283 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
284 PULONG pltmp = pRegisterValue; 284 u32 * pltmp = pRegisterValue;
285 int ret = -1; 285 int ret = -1;
286 286
287 // Module shutdown 287 // Module shutdown
@@ -327,7 +327,7 @@ Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue
327// FALSE : register not support 327// FALSE : register not support
328// pRegisterValue : It must be a resident buffer due to asynchronous read register. 328// pRegisterValue : It must be a resident buffer due to asynchronous read register.
329unsigned char 329unsigned char
330Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue ) 330Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue )
331{ 331{
332 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 332 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
333 struct usb_ctrlrequest * dr; 333 struct usb_ctrlrequest * dr;
@@ -348,7 +348,7 @@ Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue )
348 pRegQueue->DIRECT = 0;// read register 348 pRegQueue->DIRECT = 0;// read register
349 pRegQueue->INDEX = RegisterNo; 349 pRegQueue->INDEX = RegisterNo;
350 pRegQueue->pBuffer = pRegisterValue; 350 pRegQueue->pBuffer = pRegisterValue;
351 dr = (struct usb_ctrlrequest *)((PUCHAR)pRegQueue + sizeof(REG_QUEUE)); 351 dr = (struct usb_ctrlrequest *)((u8 *)pRegQueue + sizeof(REG_QUEUE));
352 dr->bRequestType = USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN; 352 dr->bRequestType = USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN;
353 dr->bRequest = 0x01; // USB or vendor-defined request code, burst mode 353 dr->bRequest = 0x01; // USB or vendor-defined request code, burst mode
354 dr->wValue = cpu_to_le16(0x0); 354 dr->wValue = cpu_to_le16(0x0);
@@ -359,14 +359,14 @@ Wb35Reg_Read(phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue )
359 pRegQueue->Next = NULL; 359 pRegQueue->Next = NULL;
360 pRegQueue->pUsbReq = dr; 360 pRegQueue->pUsbReq = dr;
361 pRegQueue->pUrb = pUrb; 361 pRegQueue->pUrb = pUrb;
362 OS_SPIN_LOCK_ACQUIRED ( &pWb35Reg->EP0VM_spin_lock ); 362 spin_lock_irq ( &pWb35Reg->EP0VM_spin_lock );
363 if( pWb35Reg->pRegFirst == NULL ) 363 if( pWb35Reg->pRegFirst == NULL )
364 pWb35Reg->pRegFirst = pRegQueue; 364 pWb35Reg->pRegFirst = pRegQueue;
365 else 365 else
366 pWb35Reg->pRegLast->Next = pRegQueue; 366 pWb35Reg->pRegLast->Next = pRegQueue;
367 pWb35Reg->pRegLast = pRegQueue; 367 pWb35Reg->pRegLast = pRegQueue;
368 368
369 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 369 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
370 370
371 // Start EP0VM 371 // Start EP0VM
372 Wb35Reg_EP0VM_start( pHwData ); 372 Wb35Reg_EP0VM_start( pHwData );
@@ -399,7 +399,7 @@ Wb35Reg_EP0VM(phw_data_t pHwData )
399 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 399 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
400 PURB pUrb; 400 PURB pUrb;
401 struct usb_ctrlrequest *dr; 401 struct usb_ctrlrequest *dr;
402 PULONG pBuffer; 402 u32 * pBuffer;
403 int ret = -1; 403 int ret = -1;
404 PREG_QUEUE pRegQueue; 404 PREG_QUEUE pRegQueue;
405 405
@@ -411,9 +411,9 @@ Wb35Reg_EP0VM(phw_data_t pHwData )
411 goto cleanup; 411 goto cleanup;
412 412
413 // Get the register data and send to USB through Irp 413 // Get the register data and send to USB through Irp
414 OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock ); 414 spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
415 pRegQueue = pWb35Reg->pRegFirst; 415 pRegQueue = pWb35Reg->pRegFirst;
416 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 416 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
417 417
418 if (!pRegQueue) 418 if (!pRegQueue)
419 goto cleanup; 419 goto cleanup;
@@ -429,7 +429,7 @@ Wb35Reg_EP0VM(phw_data_t pHwData )
429 429
430 usb_fill_control_urb( pUrb, pHwData->WbUsb.udev, 430 usb_fill_control_urb( pUrb, pHwData->WbUsb.udev,
431 REG_DIRECTION(pHwData->WbUsb.udev,pRegQueue), 431 REG_DIRECTION(pHwData->WbUsb.udev,pRegQueue),
432 (PUCHAR)dr,pBuffer,cpu_to_le16(dr->wLength), 432 (u8 *)dr,pBuffer,cpu_to_le16(dr->wLength),
433 Wb35Reg_EP0VM_complete, (void*)pHwData); 433 Wb35Reg_EP0VM_complete, (void*)pHwData);
434 434
435 pWb35Reg->EP0vm_state = VM_RUNNING; 435 pWb35Reg->EP0vm_state = VM_RUNNING;
@@ -468,12 +468,12 @@ Wb35Reg_EP0VM_complete(PURB pUrb)
468 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Reg->RegFireCount ); 468 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Reg->RegFireCount );
469 } else { 469 } else {
470 // Complete to send, remove the URB from the first 470 // Complete to send, remove the URB from the first
471 OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock ); 471 spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
472 pRegQueue = pWb35Reg->pRegFirst; 472 pRegQueue = pWb35Reg->pRegFirst;
473 if (pRegQueue == pWb35Reg->pRegLast) 473 if (pRegQueue == pWb35Reg->pRegLast)
474 pWb35Reg->pRegLast = NULL; 474 pWb35Reg->pRegLast = NULL;
475 pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next; 475 pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next;
476 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 476 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
477 477
478 if (pWb35Reg->EP0VM_status) { 478 if (pWb35Reg->EP0VM_status) {
479#ifdef _PE_REG_DUMP_ 479#ifdef _PE_REG_DUMP_
@@ -513,7 +513,7 @@ Wb35Reg_destroy(phw_data_t pHwData)
513 OS_SLEEP(10000); // Delay for waiting function enter 940623.1.b 513 OS_SLEEP(10000); // Delay for waiting function enter 940623.1.b
514 514
515 // Release all the data in RegQueue 515 // Release all the data in RegQueue
516 OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock ); 516 spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
517 pRegQueue = pWb35Reg->pRegFirst; 517 pRegQueue = pWb35Reg->pRegFirst;
518 while (pRegQueue) { 518 while (pRegQueue) {
519 if (pRegQueue == pWb35Reg->pRegLast) 519 if (pRegQueue == pWb35Reg->pRegLast)
@@ -521,7 +521,7 @@ Wb35Reg_destroy(phw_data_t pHwData)
521 pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next; 521 pWb35Reg->pRegFirst = pWb35Reg->pRegFirst->Next;
522 522
523 pUrb = pRegQueue->pUrb; 523 pUrb = pRegQueue->pUrb;
524 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 524 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
525 if (pUrb) { 525 if (pUrb) {
526 usb_free_urb(pUrb); 526 usb_free_urb(pUrb);
527 kfree(pRegQueue); 527 kfree(pRegQueue);
@@ -530,14 +530,11 @@ Wb35Reg_destroy(phw_data_t pHwData)
530 WBDEBUG(("EP0 queue release error\n")); 530 WBDEBUG(("EP0 queue release error\n"));
531 #endif 531 #endif
532 } 532 }
533 OS_SPIN_LOCK_ACQUIRED( &pWb35Reg->EP0VM_spin_lock ); 533 spin_lock_irq( &pWb35Reg->EP0VM_spin_lock );
534 534
535 pRegQueue = pWb35Reg->pRegFirst; 535 pRegQueue = pWb35Reg->pRegFirst;
536 } 536 }
537 OS_SPIN_LOCK_RELEASED( &pWb35Reg->EP0VM_spin_lock ); 537 spin_unlock_irq( &pWb35Reg->EP0VM_spin_lock );
538
539 // Free resource
540 OS_SPIN_LOCK_FREE( &pWb35Reg->EP0VM_spin_lock );
541} 538}
542 539
543//==================================================================================== 540//====================================================================================
@@ -550,7 +547,7 @@ unsigned char Wb35Reg_initial(phw_data_t pHwData)
550 u32 SoftwareSet, VCO_trim, TxVga, Region_ScanInterval; 547 u32 SoftwareSet, VCO_trim, TxVga, Region_ScanInterval;
551 548
552 // Spin lock is acquired for read and write IRP command 549 // Spin lock is acquired for read and write IRP command
553 OS_SPIN_LOCK_ALLOCATE( &pWb35Reg->EP0VM_spin_lock ); 550 spin_lock_init( &pWb35Reg->EP0VM_spin_lock );
554 551
555 // Getting RF module type from EEPROM ------------------------------------ 552 // Getting RF module type from EEPROM ------------------------------------
556 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x080d0000 ); // Start EEPROM access + Read + address(0x0d) 553 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x080d0000 ); // Start EEPROM access + Read + address(0x0d)
@@ -655,7 +652,7 @@ unsigned char Wb35Reg_initial(phw_data_t pHwData)
655// version in _GENREQ.ASM of the DWB NE1000/2000 driver. 652// version in _GENREQ.ASM of the DWB NE1000/2000 driver.
656//================================================================================== 653//==================================================================================
657u32 654u32
658CardComputeCrc(PUCHAR Buffer, u32 Length) 655CardComputeCrc(u8 * Buffer, u32 Length)
659{ 656{
660 u32 Crc, Carry; 657 u32 Crc, Carry;
661 u32 i, j; 658 u32 i, j;
diff --git a/drivers/staging/winbond/linux/wb35reg_f.h b/drivers/staging/winbond/linux/wb35reg_f.h
index 38e2906b51a7..3006cfe99ccd 100644
--- a/drivers/staging/winbond/linux/wb35reg_f.h
+++ b/drivers/staging/winbond/linux/wb35reg_f.h
@@ -29,16 +29,16 @@ void EEPROMTxVgaAdjust( phw_data_t pHwData ); // 20060619.5 Add
29 29
30void Wb35Reg_destroy( phw_data_t pHwData ); 30void Wb35Reg_destroy( phw_data_t pHwData );
31 31
32unsigned char Wb35Reg_Read( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue ); 32unsigned char Wb35Reg_Read( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue );
33unsigned char Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterValue ); 33unsigned char Wb35Reg_ReadSync( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterValue );
34unsigned char Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue ); 34unsigned char Wb35Reg_Write( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue );
35unsigned char Wb35Reg_WriteSync( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue ); 35unsigned char Wb35Reg_WriteSync( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue );
36unsigned char Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData, 36unsigned char Wb35Reg_WriteWithCallbackValue( phw_data_t pHwData,
37 u16 RegisterNo, 37 u16 RegisterNo,
38 u32 RegisterValue, 38 u32 RegisterValue,
39 PCHAR pValue, 39 s8 *pValue,
40 s8 Len); 40 s8 Len);
41unsigned char Wb35Reg_BurstWrite( phw_data_t pHwData, u16 RegisterNo, PULONG pRegisterData, u8 NumberOfData, u8 Flag ); 41unsigned char Wb35Reg_BurstWrite( phw_data_t pHwData, u16 RegisterNo, u32 * pRegisterData, u8 NumberOfData, u8 Flag );
42 42
43void Wb35Reg_EP0VM( phw_data_t pHwData ); 43void Wb35Reg_EP0VM( phw_data_t pHwData );
44void Wb35Reg_EP0VM_start( phw_data_t pHwData ); 44void Wb35Reg_EP0VM_start( phw_data_t pHwData );
@@ -47,7 +47,7 @@ void Wb35Reg_EP0VM_complete( PURB pUrb );
47u32 BitReverse( u32 dwData, u32 DataLength); 47u32 BitReverse( u32 dwData, u32 DataLength);
48 48
49void CardGetMulticastBit( u8 Address[MAC_ADDR_LENGTH], u8 *Byte, u8 *Value ); 49void CardGetMulticastBit( u8 Address[MAC_ADDR_LENGTH], u8 *Byte, u8 *Value );
50u32 CardComputeCrc( PUCHAR Buffer, u32 Length ); 50u32 CardComputeCrc( u8 * Buffer, u32 Length );
51 51
52void Wb35Reg_phy_calibration( phw_data_t pHwData ); 52void Wb35Reg_phy_calibration( phw_data_t pHwData );
53void Wb35Reg_Update( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue ); 53void Wb35Reg_Update( phw_data_t pHwData, u16 RegisterNo, u32 RegisterValue );
diff --git a/drivers/staging/winbond/linux/wb35reg_s.h b/drivers/staging/winbond/linux/wb35reg_s.h
index a7595b1e7336..8b35b93f7f02 100644
--- a/drivers/staging/winbond/linux/wb35reg_s.h
+++ b/drivers/staging/winbond/linux/wb35reg_s.h
@@ -75,7 +75,7 @@ typedef struct _REG_QUEUE
75 union 75 union
76 { 76 {
77 u32 VALUE; 77 u32 VALUE;
78 PULONG pBuffer; 78 u32 * pBuffer;
79 }; 79 };
80 u8 RESERVED[4];// space reserved for communication 80 u8 RESERVED[4];// space reserved for communication
81 81
@@ -143,7 +143,7 @@ typedef struct _WB35REG
143 //------------------- 143 //-------------------
144 // VM 144 // VM
145 //------------------- 145 //-------------------
146 OS_SPIN_LOCK EP0VM_spin_lock; // 4B 146 spinlock_t EP0VM_spin_lock; // 4B
147 u32 EP0VM_status;//$$ 147 u32 EP0VM_status;//$$
148 PREG_QUEUE pRegFirst; 148 PREG_QUEUE pRegFirst;
149 PREG_QUEUE pRegLast; 149 PREG_QUEUE pRegLast;
diff --git a/drivers/staging/winbond/linux/wb35rx.c b/drivers/staging/winbond/linux/wb35rx.c
index 26157eb3d5a2..b4b9f5f371d9 100644
--- a/drivers/staging/winbond/linux/wb35rx.c
+++ b/drivers/staging/winbond/linux/wb35rx.c
@@ -27,7 +27,7 @@ void Wb35Rx_start(phw_data_t pHwData)
27void Wb35Rx( phw_data_t pHwData ) 27void Wb35Rx( phw_data_t pHwData )
28{ 28{
29 PWB35RX pWb35Rx = &pHwData->Wb35Rx; 29 PWB35RX pWb35Rx = &pHwData->Wb35Rx;
30 PUCHAR pRxBufferAddress; 30 u8 * pRxBufferAddress;
31 PURB pUrb = (PURB)pWb35Rx->RxUrb; 31 PURB pUrb = (PURB)pWb35Rx->RxUrb;
32 int retv; 32 int retv;
33 u32 RxBufferId; 33 u32 RxBufferId;
@@ -35,51 +35,50 @@ void Wb35Rx( phw_data_t pHwData )
35 // 35 //
36 // Issuing URB 36 // Issuing URB
37 // 37 //
38 do { 38 if (pHwData->SurpriseRemove || pHwData->HwStop)
39 if (pHwData->SurpriseRemove || pHwData->HwStop) 39 goto error;
40 break;
41 40
42 if (pWb35Rx->rx_halt) 41 if (pWb35Rx->rx_halt)
43 break; 42 goto error;
44 43
45 // Get RxBuffer's ID 44 // Get RxBuffer's ID
46 RxBufferId = pWb35Rx->RxBufferId; 45 RxBufferId = pWb35Rx->RxBufferId;
47 if (!pWb35Rx->RxOwner[RxBufferId]) { 46 if (!pWb35Rx->RxOwner[RxBufferId]) {
48 // It's impossible to run here. 47 // It's impossible to run here.
49 #ifdef _PE_RX_DUMP_ 48 #ifdef _PE_RX_DUMP_
50 WBDEBUG(("Rx driver fifo unavailable\n")); 49 WBDEBUG(("Rx driver fifo unavailable\n"));
51 #endif 50 #endif
52 break; 51 goto error;
53 } 52 }
54 53
55 // Update buffer point, then start to bulkin the data from USB 54 // Update buffer point, then start to bulkin the data from USB
56 pWb35Rx->RxBufferId++; 55 pWb35Rx->RxBufferId++;
57 pWb35Rx->RxBufferId %= MAX_USB_RX_BUFFER_NUMBER; 56 pWb35Rx->RxBufferId %= MAX_USB_RX_BUFFER_NUMBER;
58 57
59 pWb35Rx->CurrentRxBufferId = RxBufferId; 58 pWb35Rx->CurrentRxBufferId = RxBufferId;
60 59
61 if (1 != OS_MEMORY_ALLOC((void* *)&pWb35Rx->pDRx, MAX_USB_RX_BUFFER)) { 60 if (1 != OS_MEMORY_ALLOC((void* *)&pWb35Rx->pDRx, MAX_USB_RX_BUFFER)) {
62 printk("w35und: Rx memory alloc failed\n"); 61 printk("w35und: Rx memory alloc failed\n");
63 break; 62 goto error;
64 } 63 }
65 pRxBufferAddress = pWb35Rx->pDRx; 64 pRxBufferAddress = pWb35Rx->pDRx;
66 65
67 usb_fill_bulk_urb(pUrb, pHwData->WbUsb.udev, 66 usb_fill_bulk_urb(pUrb, pHwData->WbUsb.udev,
68 usb_rcvbulkpipe(pHwData->WbUsb.udev, 3), 67 usb_rcvbulkpipe(pHwData->WbUsb.udev, 3),
69 pRxBufferAddress, MAX_USB_RX_BUFFER, 68 pRxBufferAddress, MAX_USB_RX_BUFFER,
70 Wb35Rx_Complete, pHwData); 69 Wb35Rx_Complete, pHwData);
71 70
72 pWb35Rx->EP3vm_state = VM_RUNNING; 71 pWb35Rx->EP3vm_state = VM_RUNNING;
73 72
74 retv = wb_usb_submit_urb(pUrb); 73 retv = wb_usb_submit_urb(pUrb);
75 74
76 if (retv != 0) { 75 if (retv != 0) {
77 printk("Rx URB sending error\n"); 76 printk("Rx URB sending error\n");
78 break; 77 goto error;
79 } 78 }
80 return; 79 return;
81 } while(FALSE);
82 80
81error:
83 // VM stop 82 // VM stop
84 pWb35Rx->EP3vm_state = VM_STOP; 83 pWb35Rx->EP3vm_state = VM_STOP;
85 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Rx->RxFireCounter ); 84 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Rx->RxFireCounter );
@@ -89,7 +88,7 @@ void Wb35Rx_Complete(PURB pUrb)
89{ 88{
90 phw_data_t pHwData = pUrb->context; 89 phw_data_t pHwData = pUrb->context;
91 PWB35RX pWb35Rx = &pHwData->Wb35Rx; 90 PWB35RX pWb35Rx = &pHwData->Wb35Rx;
92 PUCHAR pRxBufferAddress; 91 u8 * pRxBufferAddress;
93 u32 SizeCheck; 92 u32 SizeCheck;
94 u16 BulkLength; 93 u16 BulkLength;
95 u32 RxBufferId; 94 u32 RxBufferId;
@@ -99,65 +98,63 @@ void Wb35Rx_Complete(PURB pUrb)
99 pWb35Rx->EP3vm_state = VM_COMPLETED; 98 pWb35Rx->EP3vm_state = VM_COMPLETED;
100 pWb35Rx->EP3VM_status = pUrb->status;//Store the last result of Irp 99 pWb35Rx->EP3VM_status = pUrb->status;//Store the last result of Irp
101 100
102 do { 101 RxBufferId = pWb35Rx->CurrentRxBufferId;
103 RxBufferId = pWb35Rx->CurrentRxBufferId;
104 102
105 pRxBufferAddress = pWb35Rx->pDRx; 103 pRxBufferAddress = pWb35Rx->pDRx;
106 BulkLength = (u16)pUrb->actual_length; 104 BulkLength = (u16)pUrb->actual_length;
107 105
108 // The IRP is completed 106 // The IRP is completed
109 pWb35Rx->EP3vm_state = VM_COMPLETED; 107 pWb35Rx->EP3vm_state = VM_COMPLETED;
110 108
111 if (pHwData->SurpriseRemove || pHwData->HwStop) // Must be here, or RxBufferId is invalid 109 if (pHwData->SurpriseRemove || pHwData->HwStop) // Must be here, or RxBufferId is invalid
112 break; 110 goto error;
113 111
114 if (pWb35Rx->rx_halt) 112 if (pWb35Rx->rx_halt)
115 break; 113 goto error;
116 114
117 // Start to process the data only in successful condition 115 // Start to process the data only in successful condition
118 pWb35Rx->RxOwner[ RxBufferId ] = 0; // Set the owner to driver 116 pWb35Rx->RxOwner[ RxBufferId ] = 0; // Set the owner to driver
119 R00.value = le32_to_cpu(*(PULONG)pRxBufferAddress); 117 R00.value = le32_to_cpu(*(u32 *)pRxBufferAddress);
120 118
121 // The URB is completed, check the result 119 // The URB is completed, check the result
122 if (pWb35Rx->EP3VM_status != 0) { 120 if (pWb35Rx->EP3VM_status != 0) {
123 #ifdef _PE_USB_STATE_DUMP_ 121 #ifdef _PE_USB_STATE_DUMP_
124 WBDEBUG(("EP3 IoCompleteRoutine return error\n")); 122 WBDEBUG(("EP3 IoCompleteRoutine return error\n"));
125 DebugUsbdStatusInformation( pWb35Rx->EP3VM_status ); 123 DebugUsbdStatusInformation( pWb35Rx->EP3VM_status );
126 #endif 124 #endif
127 pWb35Rx->EP3vm_state = VM_STOP; 125 pWb35Rx->EP3vm_state = VM_STOP;
128 break; 126 goto error;
129 } 127 }
130 128
131 // 20060220 For recovering. check if operating in single USB mode 129 // 20060220 For recovering. check if operating in single USB mode
132 if (!HAL_USB_MODE_BURST(pHwData)) { 130 if (!HAL_USB_MODE_BURST(pHwData)) {
133 SizeCheck = R00.R00_receive_byte_count; //20060926 anson's endian 131 SizeCheck = R00.R00_receive_byte_count; //20060926 anson's endian
134 if ((SizeCheck & 0x03) > 0) 132 if ((SizeCheck & 0x03) > 0)
135 SizeCheck -= 4; 133 SizeCheck -= 4;
136 SizeCheck = (SizeCheck + 3) & ~0x03; 134 SizeCheck = (SizeCheck + 3) & ~0x03;
137 SizeCheck += 12; // 8 + 4 badbeef 135 SizeCheck += 12; // 8 + 4 badbeef
138 if ((BulkLength > 1600) || 136 if ((BulkLength > 1600) ||
139 (SizeCheck > 1600) || 137 (SizeCheck > 1600) ||
140 (BulkLength != SizeCheck) || 138 (BulkLength != SizeCheck) ||
141 (BulkLength == 0)) { // Add for fail Urb 139 (BulkLength == 0)) { // Add for fail Urb
142 pWb35Rx->EP3vm_state = VM_STOP; 140 pWb35Rx->EP3vm_state = VM_STOP;
143 pWb35Rx->Ep3ErrorCount2++; 141 pWb35Rx->Ep3ErrorCount2++;
144 }
145 } 142 }
143 }
146 144
147 // Indicating the receiving data 145 // Indicating the receiving data
148 pWb35Rx->ByteReceived += BulkLength; 146 pWb35Rx->ByteReceived += BulkLength;
149 pWb35Rx->RxBufferSize[ RxBufferId ] = BulkLength; 147 pWb35Rx->RxBufferSize[ RxBufferId ] = BulkLength;
150
151 if (!pWb35Rx->RxOwner[ RxBufferId ])
152 Wb35Rx_indicate(pHwData);
153 148
154 kfree(pWb35Rx->pDRx); 149 if (!pWb35Rx->RxOwner[ RxBufferId ])
155 // Do the next receive 150 Wb35Rx_indicate(pHwData);
156 Wb35Rx(pHwData);
157 return;
158 151
159 } while(FALSE); 152 kfree(pWb35Rx->pDRx);
153 // Do the next receive
154 Wb35Rx(pHwData);
155 return;
160 156
157error:
161 pWb35Rx->RxOwner[ RxBufferId ] = 1; // Set the owner to hardware 158 pWb35Rx->RxOwner[ RxBufferId ] = 1; // Set the owner to hardware
162 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Rx->RxFireCounter ); 159 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Rx->RxFireCounter );
163 pWb35Rx->EP3vm_state = VM_STOP; 160 pWb35Rx->EP3vm_state = VM_STOP;
@@ -223,7 +220,7 @@ void Wb35Rx_reset_descriptor( phw_data_t pHwData )
223 220
224void Wb35Rx_adjust(PDESCRIPTOR pRxDes) 221void Wb35Rx_adjust(PDESCRIPTOR pRxDes)
225{ 222{
226 PULONG pRxBufferAddress; 223 u32 * pRxBufferAddress;
227 u32 DecryptionMethod; 224 u32 DecryptionMethod;
228 u32 i; 225 u32 i;
229 u16 BufferSize; 226 u16 BufferSize;
@@ -264,7 +261,7 @@ u16 Wb35Rx_indicate(phw_data_t pHwData)
264{ 261{
265 DESCRIPTOR RxDes; 262 DESCRIPTOR RxDes;
266 PWB35RX pWb35Rx = &pHwData->Wb35Rx; 263 PWB35RX pWb35Rx = &pHwData->Wb35Rx;
267 PUCHAR pRxBufferAddress; 264 u8 * pRxBufferAddress;
268 u16 PacketSize; 265 u16 PacketSize;
269 u16 stmp, BufferSize, stmp2 = 0; 266 u16 stmp, BufferSize, stmp2 = 0;
270 u32 RxBufferId; 267 u32 RxBufferId;
@@ -283,13 +280,13 @@ u16 Wb35Rx_indicate(phw_data_t pHwData)
283 280
284 // Parse the bulkin buffer 281 // Parse the bulkin buffer
285 while (BufferSize >= 4) { 282 while (BufferSize >= 4) {
286 if ((cpu_to_le32(*(PULONG)pRxBufferAddress) & 0x0fffffff) == RX_END_TAG) //Is ending? 921002.9.a 283 if ((cpu_to_le32(*(u32 *)pRxBufferAddress) & 0x0fffffff) == RX_END_TAG) //Is ending? 921002.9.a
287 break; 284 break;
288 285
289 // Get the R00 R01 first 286 // Get the R00 R01 first
290 RxDes.R00.value = le32_to_cpu(*(PULONG)pRxBufferAddress); 287 RxDes.R00.value = le32_to_cpu(*(u32 *)pRxBufferAddress);
291 PacketSize = (u16)RxDes.R00.R00_receive_byte_count; 288 PacketSize = (u16)RxDes.R00.R00_receive_byte_count;
292 RxDes.R01.value = le32_to_cpu(*((PULONG)(pRxBufferAddress+4))); 289 RxDes.R01.value = le32_to_cpu(*((u32 *)(pRxBufferAddress+4)));
293 // For new DMA 4k 290 // For new DMA 4k
294 if ((PacketSize & 0x03) > 0) 291 if ((PacketSize & 0x03) > 0)
295 PacketSize -= 4; 292 PacketSize -= 4;
diff --git a/drivers/staging/winbond/linux/wb35rx_s.h b/drivers/staging/winbond/linux/wb35rx_s.h
index 53b831fdeb78..b90c269e6adb 100644
--- a/drivers/staging/winbond/linux/wb35rx_s.h
+++ b/drivers/staging/winbond/linux/wb35rx_s.h
@@ -41,7 +41,7 @@ typedef struct _WB35RX
41 u32 Ep3ErrorCount2; // 20060625.1 Usbd for Rx DMA error count 41 u32 Ep3ErrorCount2; // 20060625.1 Usbd for Rx DMA error count
42 42
43 int EP3VM_status; 43 int EP3VM_status;
44 PUCHAR pDRx; 44 u8 * pDRx;
45 45
46} WB35RX, *PWB35RX; 46} WB35RX, *PWB35RX;
47 47
diff --git a/drivers/staging/winbond/linux/wb35tx.c b/drivers/staging/winbond/linux/wb35tx.c
index cf19c3bc524a..ba9d51244e29 100644
--- a/drivers/staging/winbond/linux/wb35tx.c
+++ b/drivers/staging/winbond/linux/wb35tx.c
@@ -12,7 +12,7 @@
12 12
13 13
14unsigned char 14unsigned char
15Wb35Tx_get_tx_buffer(phw_data_t pHwData, PUCHAR *pBuffer ) 15Wb35Tx_get_tx_buffer(phw_data_t pHwData, u8 **pBuffer)
16{ 16{
17 PWB35TX pWb35Tx = &pHwData->Wb35Tx; 17 PWB35TX pWb35Tx = &pHwData->Wb35Tx;
18 18
@@ -37,7 +37,7 @@ void Wb35Tx(phw_data_t pHwData)
37{ 37{
38 PWB35TX pWb35Tx = &pHwData->Wb35Tx; 38 PWB35TX pWb35Tx = &pHwData->Wb35Tx;
39 PADAPTER Adapter = pHwData->Adapter; 39 PADAPTER Adapter = pHwData->Adapter;
40 PUCHAR pTxBufferAddress; 40 u8 *pTxBufferAddress;
41 PMDS pMds = &Adapter->Mds; 41 PMDS pMds = &Adapter->Mds;
42 struct urb * pUrb = (struct urb *)pWb35Tx->Tx4Urb; 42 struct urb * pUrb = (struct urb *)pWb35Tx->Tx4Urb;
43 int retv; 43 int retv;
@@ -100,25 +100,24 @@ void Wb35Tx_complete(struct urb * pUrb)
100 pWb35Tx->TxSendIndex++; 100 pWb35Tx->TxSendIndex++;
101 pWb35Tx->TxSendIndex %= MAX_USB_TX_BUFFER_NUMBER; 101 pWb35Tx->TxSendIndex %= MAX_USB_TX_BUFFER_NUMBER;
102 102
103 do { 103 if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
104 if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove 104 goto error;
105 break;
106 105
107 if (pWb35Tx->tx_halt) 106 if (pWb35Tx->tx_halt)
108 break; 107 goto error;
109 108
110 // The URB is completed, check the result 109 // The URB is completed, check the result
111 if (pWb35Tx->EP4VM_status != 0) { 110 if (pWb35Tx->EP4VM_status != 0) {
112 printk("URB submission failed\n"); 111 printk("URB submission failed\n");
113 pWb35Tx->EP4vm_state = VM_STOP; 112 pWb35Tx->EP4vm_state = VM_STOP;
114 break; // Exit while(FALSE); 113 goto error;
115 } 114 }
116 115
117 Mds_Tx(Adapter); 116 Mds_Tx(Adapter);
118 Wb35Tx(pHwData); 117 Wb35Tx(pHwData);
119 return; 118 return;
120 } while(FALSE);
121 119
120error:
122 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxFireCounter ); 121 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxFireCounter );
123 pWb35Tx->EP4vm_state = VM_STOP; 122 pWb35Tx->EP4vm_state = VM_STOP;
124} 123}
@@ -225,36 +224,33 @@ void Wb35Tx_EP2VM(phw_data_t pHwData)
225{ 224{
226 PWB35TX pWb35Tx = &pHwData->Wb35Tx; 225 PWB35TX pWb35Tx = &pHwData->Wb35Tx;
227 struct urb * pUrb = (struct urb *)pWb35Tx->Tx2Urb; 226 struct urb * pUrb = (struct urb *)pWb35Tx->Tx2Urb;
228 PULONG pltmp = (PULONG)pWb35Tx->EP2_buf; 227 u32 * pltmp = (u32 *)pWb35Tx->EP2_buf;
229 int retv; 228 int retv;
230 229
231 do { 230 if (pHwData->SurpriseRemove || pHwData->HwStop)
232 if (pHwData->SurpriseRemove || pHwData->HwStop) 231 goto error;
233 break;
234
235 if (pWb35Tx->tx_halt)
236 break;
237
238 //
239 // Issuing URB
240 //
241 usb_fill_int_urb( pUrb, pHwData->WbUsb.udev, usb_rcvintpipe(pHwData->WbUsb.udev,2),
242 pltmp, MAX_INTERRUPT_LENGTH, Wb35Tx_EP2VM_complete, pHwData, 32);
243 232
244 pWb35Tx->EP2vm_state = VM_RUNNING; 233 if (pWb35Tx->tx_halt)
245 retv = wb_usb_submit_urb( pUrb ); 234 goto error;
246 235
247 if(retv < 0) { 236 //
248 #ifdef _PE_TX_DUMP_ 237 // Issuing URB
249 WBDEBUG(("EP2 Tx Irp sending error\n")); 238 //
250 #endif 239 usb_fill_int_urb( pUrb, pHwData->WbUsb.udev, usb_rcvintpipe(pHwData->WbUsb.udev,2),
251 break; 240 pltmp, MAX_INTERRUPT_LENGTH, Wb35Tx_EP2VM_complete, pHwData, 32);
252 }
253 241
254 return; 242 pWb35Tx->EP2vm_state = VM_RUNNING;
243 retv = wb_usb_submit_urb( pUrb );
255 244
256 } while(FALSE); 245 if (retv < 0) {
246 #ifdef _PE_TX_DUMP_
247 WBDEBUG(("EP2 Tx Irp sending error\n"));
248 #endif
249 goto error;
250 }
257 251
252 return;
253error:
258 pWb35Tx->EP2vm_state = VM_STOP; 254 pWb35Tx->EP2vm_state = VM_STOP;
259 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxResultCount ); 255 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxResultCount );
260} 256}
@@ -266,7 +262,7 @@ void Wb35Tx_EP2VM_complete(struct urb * pUrb)
266 T02_DESCRIPTOR T02, TSTATUS; 262 T02_DESCRIPTOR T02, TSTATUS;
267 PADAPTER Adapter = (PADAPTER)pHwData->Adapter; 263 PADAPTER Adapter = (PADAPTER)pHwData->Adapter;
268 PWB35TX pWb35Tx = &pHwData->Wb35Tx; 264 PWB35TX pWb35Tx = &pHwData->Wb35Tx;
269 PULONG pltmp = (PULONG)pWb35Tx->EP2_buf; 265 u32 * pltmp = (u32 *)pWb35Tx->EP2_buf;
270 u32 i; 266 u32 i;
271 u16 InterruptInLength; 267 u16 InterruptInLength;
272 268
@@ -275,38 +271,36 @@ void Wb35Tx_EP2VM_complete(struct urb * pUrb)
275 pWb35Tx->EP2vm_state = VM_COMPLETED; 271 pWb35Tx->EP2vm_state = VM_COMPLETED;
276 pWb35Tx->EP2VM_status = pUrb->status; 272 pWb35Tx->EP2VM_status = pUrb->status;
277 273
278 do { 274 // For Linux 2.4. Interrupt will always trigger
279 // For Linux 2.4. Interrupt will always trigger 275 if (pHwData->SurpriseRemove || pHwData->HwStop) // Let WbWlanHalt to handle surprise remove
280 if( pHwData->SurpriseRemove || pHwData->HwStop ) // Let WbWlanHalt to handle surprise remove 276 goto error;
281 break; 277
282 278 if (pWb35Tx->tx_halt)
283 if( pWb35Tx->tx_halt ) 279 goto error;
284 break; 280
285 281 //The Urb is completed, check the result
286 //The Urb is completed, check the result 282 if (pWb35Tx->EP2VM_status != 0) {
287 if (pWb35Tx->EP2VM_status != 0) { 283 WBDEBUG(("EP2 IoCompleteRoutine return error\n"));
288 WBDEBUG(("EP2 IoCompleteRoutine return error\n")); 284 pWb35Tx->EP2vm_state= VM_STOP;
289 pWb35Tx->EP2vm_state= VM_STOP; 285 goto error;
290 break; // Exit while(FALSE); 286 }
291 }
292
293 // Update the Tx result
294 InterruptInLength = pUrb->actual_length;
295 // Modify for minimum memory access and DWORD alignment.
296 T02.value = cpu_to_le32(pltmp[0]) >> 8; // [31:8] -> [24:0]
297 InterruptInLength -= 1;// 20051221.1.c Modify the follow for more stable
298 InterruptInLength >>= 2; // InterruptInLength/4
299 for (i=1; i<=InterruptInLength; i++) {
300 T02.value |= ((cpu_to_le32(pltmp[i]) & 0xff) << 24);
301
302 TSTATUS.value = T02.value; //20061009 anson's endian
303 Mds_SendComplete( Adapter, &TSTATUS );
304 T02.value = cpu_to_le32(pltmp[i]) >> 8;
305 }
306
307 return;
308 } while(FALSE);
309 287
288 // Update the Tx result
289 InterruptInLength = pUrb->actual_length;
290 // Modify for minimum memory access and DWORD alignment.
291 T02.value = cpu_to_le32(pltmp[0]) >> 8; // [31:8] -> [24:0]
292 InterruptInLength -= 1;// 20051221.1.c Modify the follow for more stable
293 InterruptInLength >>= 2; // InterruptInLength/4
294 for (i = 1; i <= InterruptInLength; i++) {
295 T02.value |= ((cpu_to_le32(pltmp[i]) & 0xff) << 24);
296
297 TSTATUS.value = T02.value; //20061009 anson's endian
298 Mds_SendComplete( Adapter, &TSTATUS );
299 T02.value = cpu_to_le32(pltmp[i]) >> 8;
300 }
301
302 return;
303error:
310 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxResultCount ); 304 OS_ATOMIC_DEC( pHwData->Adapter, &pWb35Tx->TxResultCount );
311 pWb35Tx->EP2vm_state = VM_STOP; 305 pWb35Tx->EP2vm_state = VM_STOP;
312} 306}
diff --git a/drivers/staging/winbond/linux/wb35tx_f.h b/drivers/staging/winbond/linux/wb35tx_f.h
index 7705a8454dcb..107b12918137 100644
--- a/drivers/staging/winbond/linux/wb35tx_f.h
+++ b/drivers/staging/winbond/linux/wb35tx_f.h
@@ -3,7 +3,7 @@
3//==================================== 3//====================================
4unsigned char Wb35Tx_initial( phw_data_t pHwData ); 4unsigned char Wb35Tx_initial( phw_data_t pHwData );
5void Wb35Tx_destroy( phw_data_t pHwData ); 5void Wb35Tx_destroy( phw_data_t pHwData );
6unsigned char Wb35Tx_get_tx_buffer( phw_data_t pHwData, PUCHAR *pBuffer ); 6unsigned char Wb35Tx_get_tx_buffer( phw_data_t pHwData, u8 **pBuffer );
7 7
8void Wb35Tx_EP2VM( phw_data_t pHwData ); 8void Wb35Tx_EP2VM( phw_data_t pHwData );
9void Wb35Tx_EP2VM_start( phw_data_t pHwData ); 9void Wb35Tx_EP2VM_start( phw_data_t pHwData );
diff --git a/drivers/staging/winbond/linux/wbusb.c b/drivers/staging/winbond/linux/wbusb.c
index cbad5fb05959..f4a7875f2389 100644
--- a/drivers/staging/winbond/linux/wbusb.c
+++ b/drivers/staging/winbond/linux/wbusb.c
@@ -6,42 +6,29 @@
6#include "sysdef.h" 6#include "sysdef.h"
7#include <net/mac80211.h> 7#include <net/mac80211.h>
8 8
9 9MODULE_AUTHOR(DRIVER_AUTHOR);
10MODULE_AUTHOR( DRIVER_AUTHOR ); 10MODULE_DESCRIPTION(DRIVER_DESC);
11MODULE_DESCRIPTION( DRIVER_DESC );
12MODULE_LICENSE("GPL"); 11MODULE_LICENSE("GPL");
13MODULE_VERSION("0.1"); 12MODULE_VERSION("0.1");
14 13
15 14static struct usb_device_id wb35_table[] __devinitdata = {
16//============================================================ 15 {USB_DEVICE(0x0416, 0x0035)},
17// vendor ID and product ID can into here for others 16 {USB_DEVICE(0x18E8, 0x6201)},
18//============================================================ 17 {USB_DEVICE(0x18E8, 0x6206)},
19static struct usb_device_id Id_Table[] = 18 {USB_DEVICE(0x18E8, 0x6217)},
20{ 19 {USB_DEVICE(0x18E8, 0x6230)},
21 {USB_DEVICE( 0x0416, 0x0035 )}, 20 {USB_DEVICE(0x18E8, 0x6233)},
22 {USB_DEVICE( 0x18E8, 0x6201 )}, 21 {USB_DEVICE(0x1131, 0x2035)},
23 {USB_DEVICE( 0x18E8, 0x6206 )}, 22 { 0, }
24 {USB_DEVICE( 0x18E8, 0x6217 )},
25 {USB_DEVICE( 0x18E8, 0x6230 )},
26 {USB_DEVICE( 0x18E8, 0x6233 )},
27 {USB_DEVICE( 0x1131, 0x2035 )},
28 { }
29}; 23};
30 24
31MODULE_DEVICE_TABLE(usb, Id_Table); 25MODULE_DEVICE_TABLE(usb, wb35_table);
32 26
33static struct usb_driver wb35_driver = { 27static struct ieee80211_rate wbsoft_rates[] = {
34 .name = "w35und",
35 .probe = wb35_probe,
36 .disconnect = wb35_disconnect,
37 .id_table = Id_Table,
38};
39
40static const struct ieee80211_rate wbsoft_rates[] = {
41 { .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 28 { .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
42}; 29};
43 30
44static const struct ieee80211_channel wbsoft_channels[] = { 31static struct ieee80211_channel wbsoft_channels[] = {
45 { .center_freq = 2412}, 32 { .center_freq = 2412},
46}; 33};
47 34
@@ -62,9 +49,22 @@ static void wbsoft_remove_interface(struct ieee80211_hw *dev,
62 printk("wbsoft_remove interface called\n"); 49 printk("wbsoft_remove interface called\n");
63} 50}
64 51
65static int wbsoft_nop(void) 52static void wbsoft_stop(struct ieee80211_hw *hw)
53{
54 printk(KERN_INFO "%s called\n", __func__);
55}
56
57static int wbsoft_get_stats(struct ieee80211_hw *hw,
58 struct ieee80211_low_level_stats *stats)
66{ 59{
67 printk("wbsoft_nop called\n"); 60 printk(KERN_INFO "%s called\n", __func__);
61 return 0;
62}
63
64static int wbsoft_get_tx_stats(struct ieee80211_hw *hw,
65 struct ieee80211_tx_queue_stats *stats)
66{
67 printk(KERN_INFO "%s called\n", __func__);
68 return 0; 68 return 0;
69} 69}
70 70
@@ -105,8 +105,7 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev,
105 *total_flags = new_flags; 105 *total_flags = new_flags;
106} 106}
107 107
108static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb, 108static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
109 struct ieee80211_tx_control *control)
110{ 109{
111 char *buffer = kmalloc(skb->len, GFP_ATOMIC); 110 char *buffer = kmalloc(skb->len, GFP_ATOMIC);
112 printk("Sending frame %d bytes\n", skb->len); 111 printk("Sending frame %d bytes\n", skb->len);
@@ -136,7 +135,7 @@ static int wbsoft_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
136 hal_set_current_channel(&my_adapter->sHwData, ch); 135 hal_set_current_channel(&my_adapter->sHwData, ch);
137 hal_set_beacon_period(&my_adapter->sHwData, conf->beacon_int); 136 hal_set_beacon_period(&my_adapter->sHwData, conf->beacon_int);
138// hal_set_cap_info(&my_adapter->sHwData, ?? ); 137// hal_set_cap_info(&my_adapter->sHwData, ?? );
139// hal_set_ssid(phw_data_t pHwData, PUCHAR pssid, u8 ssid_len); ?? 138// hal_set_ssid(phw_data_t pHwData, u8 * pssid, u8 ssid_len); ??
140 hal_set_accept_broadcast(&my_adapter->sHwData, 1); 139 hal_set_accept_broadcast(&my_adapter->sHwData, 1);
141 hal_set_accept_promiscuous(&my_adapter->sHwData, 1); 140 hal_set_accept_promiscuous(&my_adapter->sHwData, 1);
142 hal_set_accept_multicast(&my_adapter->sHwData, 1); 141 hal_set_accept_multicast(&my_adapter->sHwData, 1);
@@ -148,7 +147,7 @@ static int wbsoft_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
148 147
149// hal_start_bss(&my_adapter->sHwData, WLAN_BSSTYPE_INFRASTRUCTURE); ?? 148// hal_start_bss(&my_adapter->sHwData, WLAN_BSSTYPE_INFRASTRUCTURE); ??
150 149
151//void hal_set_rates(phw_data_t pHwData, PUCHAR pbss_rates, 150//void hal_set_rates(phw_data_t pHwData, u8 * pbss_rates,
152// u8 length, unsigned char basic_rate_set) 151// u8 length, unsigned char basic_rate_set)
153 152
154 return 0; 153 return 0;
@@ -171,14 +170,14 @@ static u64 wbsoft_get_tsf(struct ieee80211_hw *dev)
171static const struct ieee80211_ops wbsoft_ops = { 170static const struct ieee80211_ops wbsoft_ops = {
172 .tx = wbsoft_tx, 171 .tx = wbsoft_tx,
173 .start = wbsoft_start, /* Start can be pretty much empty as we do WbWLanInitialize() during probe? */ 172 .start = wbsoft_start, /* Start can be pretty much empty as we do WbWLanInitialize() during probe? */
174 .stop = wbsoft_nop, 173 .stop = wbsoft_stop,
175 .add_interface = wbsoft_add_interface, 174 .add_interface = wbsoft_add_interface,
176 .remove_interface = wbsoft_remove_interface, 175 .remove_interface = wbsoft_remove_interface,
177 .config = wbsoft_config, 176 .config = wbsoft_config,
178 .config_interface = wbsoft_config_interface, 177 .config_interface = wbsoft_config_interface,
179 .configure_filter = wbsoft_configure_filter, 178 .configure_filter = wbsoft_configure_filter,
180 .get_stats = wbsoft_nop, 179 .get_stats = wbsoft_get_stats,
181 .get_tx_stats = wbsoft_nop, 180 .get_tx_stats = wbsoft_get_tx_stats,
182 .get_tsf = wbsoft_get_tsf, 181 .get_tsf = wbsoft_get_tsf,
183// conf_tx: hal_set_cwmin()/hal_set_cwmax; 182// conf_tx: hal_set_cwmin()/hal_set_cwmax;
184}; 183};
@@ -187,21 +186,6 @@ struct wbsoft_priv {
187}; 186};
188 187
189 188
190int __init wb35_init(void)
191{
192 printk("[w35und]driver init\n");
193 return usb_register(&wb35_driver);
194}
195
196void __exit wb35_exit(void)
197{
198 printk("[w35und]driver exit\n");
199 usb_deregister( &wb35_driver );
200}
201
202module_init(wb35_init);
203module_exit(wb35_exit);
204
205// Usb kernel subsystem will call this function when a new device is plugged into. 189// Usb kernel subsystem will call this function when a new device is plugged into.
206int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table) 190int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table)
207{ 191{
@@ -210,7 +194,7 @@ int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table)
210 PWBUSB pWbUsb; 194 PWBUSB pWbUsb;
211 struct usb_host_interface *interface; 195 struct usb_host_interface *interface;
212 struct usb_endpoint_descriptor *endpoint; 196 struct usb_endpoint_descriptor *endpoint;
213 int i, ret = -1; 197 int ret = -1;
214 u32 ltmp; 198 u32 ltmp;
215 struct usb_device *udev = interface_to_usbdev(intf); 199 struct usb_device *udev = interface_to_usbdev(intf);
216 200
@@ -218,114 +202,95 @@ int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table)
218 202
219 printk("[w35und]wb35_probe ->\n"); 203 printk("[w35und]wb35_probe ->\n");
220 204
221 do { 205 // 20060630.2 Check the device if it already be opened
222 for (i=0; i<(sizeof(Id_Table)/sizeof(struct usb_device_id)); i++ ) { 206 ret = usb_control_msg(udev, usb_rcvctrlpipe( udev, 0 ),
223 if ((udev->descriptor.idVendor == Id_Table[i].idVendor) && 207 0x01, USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN,
224 (udev->descriptor.idProduct == Id_Table[i].idProduct)) { 208 0x0, 0x400, &ltmp, 4, HZ*100 );
225 printk("[w35und]Found supported hardware\n"); 209 if (ret < 0)
226 break; 210 goto error;
227 }
228 }
229 if ((i == (sizeof(Id_Table)/sizeof(struct usb_device_id)))) {
230 #ifdef _PE_USB_INI_DUMP_
231 WBDEBUG(("[w35und] This is not the one we are interested about\n"));
232 #endif
233 return -ENODEV;
234 }
235
236 // 20060630.2 Check the device if it already be opened
237 ret = usb_control_msg(udev, usb_rcvctrlpipe( udev, 0 ),
238 0x01, USB_TYPE_VENDOR|USB_RECIP_DEVICE|USB_DIR_IN,
239 0x0, 0x400, &ltmp, 4, HZ*100 );
240 if( ret < 0 )
241 break;
242 211
243 ltmp = cpu_to_le32(ltmp); 212 ltmp = cpu_to_le32(ltmp);
244 if (ltmp) // Is already initialized? 213 if (ltmp) // Is already initialized?
245 break; 214 goto error;
246 215
216 Adapter = kzalloc(sizeof(ADAPTER), GFP_KERNEL);
247 217
248 Adapter = kzalloc(sizeof(ADAPTER), GFP_KERNEL); 218 my_adapter = Adapter;
219 pWbLinux = &Adapter->WbLinux;
220 pWbUsb = &Adapter->sHwData.WbUsb;
221 pWbUsb->udev = udev;
249 222
250 my_adapter = Adapter; 223 interface = intf->cur_altsetting;
251 pWbLinux = &Adapter->WbLinux; 224 endpoint = &interface->endpoint[0].desc;
252 pWbUsb = &Adapter->sHwData.WbUsb;
253 pWbUsb->udev = udev;
254 225
255 interface = intf->cur_altsetting; 226 if (endpoint[2].wMaxPacketSize == 512) {
256 endpoint = &interface->endpoint[0].desc; 227 printk("[w35und] Working on USB 2.0\n");
257 228 pWbUsb->IsUsb20 = 1;
258 if (endpoint[2].wMaxPacketSize == 512) { 229 }
259 printk("[w35und] Working on USB 2.0\n");
260 pWbUsb->IsUsb20 = 1;
261 }
262
263 if (!WbWLanInitialize(Adapter)) {
264 printk("[w35und]WbWLanInitialize fail\n");
265 break;
266 }
267 230
268 { 231 if (!WbWLanInitialize(Adapter)) {
269 struct wbsoft_priv *priv; 232 printk("[w35und]WbWLanInitialize fail\n");
270 struct ieee80211_hw *dev; 233 goto error;
271 int res; 234 }
272 235
273 dev = ieee80211_alloc_hw(sizeof(*priv), &wbsoft_ops); 236 {
237 struct wbsoft_priv *priv;
238 struct ieee80211_hw *dev;
239 static struct ieee80211_supported_band band;
240 int res;
274 241
275 if (!dev) { 242 dev = ieee80211_alloc_hw(sizeof(*priv), &wbsoft_ops);
276 printk("w35und: ieee80211 alloc failed\n" );
277 BUG();
278 }
279 243
280 my_dev = dev; 244 if (!dev) {
245 printk("w35und: ieee80211 alloc failed\n" );
246 BUG();
247 }
281 248
282 SET_IEEE80211_DEV(dev, &udev->dev); 249 my_dev = dev;
283 {
284 phw_data_t pHwData = &Adapter->sHwData;
285 unsigned char dev_addr[MAX_ADDR_LEN];
286 hal_get_permanent_address(pHwData, dev_addr);
287 SET_IEEE80211_PERM_ADDR(dev, dev_addr);
288 }
289 250
251 SET_IEEE80211_DEV(dev, &udev->dev);
252 {
253 phw_data_t pHwData = &Adapter->sHwData;
254 unsigned char dev_addr[MAX_ADDR_LEN];
255 hal_get_permanent_address(pHwData, dev_addr);
256 SET_IEEE80211_PERM_ADDR(dev, dev_addr);
257 }
290 258
291 dev->extra_tx_headroom = 12; /* FIXME */
292 dev->flags = 0;
293 259
294 dev->channel_change_time = 1000; 260 dev->extra_tx_headroom = 12; /* FIXME */
295// dev->max_rssi = 100; 261 dev->flags = 0;
296 262
297 dev->queues = 1; 263 dev->channel_change_time = 1000;
264// dev->max_rssi = 100;
298 265
299 static struct ieee80211_supported_band band; 266 dev->queues = 1;
300 267
301 band.channels = wbsoft_channels; 268 band.channels = wbsoft_channels;
302 band.n_channels = ARRAY_SIZE(wbsoft_channels); 269 band.n_channels = ARRAY_SIZE(wbsoft_channels);
303 band.bitrates = wbsoft_rates; 270 band.bitrates = wbsoft_rates;
304 band.n_bitrates = ARRAY_SIZE(wbsoft_rates); 271 band.n_bitrates = ARRAY_SIZE(wbsoft_rates);
305 272
306 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band; 273 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band;
307#if 0 274#if 0
308 wbsoft_modes[0].num_channels = 1; 275 wbsoft_modes[0].num_channels = 1;
309 wbsoft_modes[0].channels = wbsoft_channels; 276 wbsoft_modes[0].channels = wbsoft_channels;
310 wbsoft_modes[0].mode = MODE_IEEE80211B; 277 wbsoft_modes[0].mode = MODE_IEEE80211B;
311 wbsoft_modes[0].num_rates = ARRAY_SIZE(wbsoft_rates); 278 wbsoft_modes[0].num_rates = ARRAY_SIZE(wbsoft_rates);
312 wbsoft_modes[0].rates = wbsoft_rates; 279 wbsoft_modes[0].rates = wbsoft_rates;
313 280
314 res = ieee80211_register_hwmode(dev, &wbsoft_modes[0]); 281 res = ieee80211_register_hwmode(dev, &wbsoft_modes[0]);
315 BUG_ON(res); 282 BUG_ON(res);
316#endif 283#endif
317 284
318 res = ieee80211_register_hw(dev); 285 res = ieee80211_register_hw(dev);
319 BUG_ON(res); 286 BUG_ON(res);
320 } 287 }
321
322 usb_set_intfdata( intf, Adapter );
323
324 printk("[w35und] _probe OK\n");
325 return 0;
326 288
327 } while(FALSE); 289 usb_set_intfdata( intf, Adapter );
328 290
291 printk("[w35und] _probe OK\n");
292 return 0;
293error:
329 return -ENOMEM; 294 return -ENOMEM;
330} 295}
331 296
@@ -401,4 +366,22 @@ void wb35_disconnect(struct usb_interface *intf)
401 366
402} 367}
403 368
369static struct usb_driver wb35_driver = {
370 .name = "w35und",
371 .id_table = wb35_table,
372 .probe = wb35_probe,
373 .disconnect = wb35_disconnect,
374};
404 375
376static int __init wb35_init(void)
377{
378 return usb_register(&wb35_driver);
379}
380
381static void __exit wb35_exit(void)
382{
383 usb_deregister(&wb35_driver);
384}
385
386module_init(wb35_init);
387module_exit(wb35_exit);
diff --git a/drivers/staging/winbond/mds.c b/drivers/staging/winbond/mds.c
index 8ce6389c4135..f1de813f9c76 100644
--- a/drivers/staging/winbond/mds.c
+++ b/drivers/staging/winbond/mds.c
@@ -40,7 +40,7 @@ Mds_Tx(PADAPTER Adapter)
40 PMDS pMds = &Adapter->Mds; 40 PMDS pMds = &Adapter->Mds;
41 DESCRIPTOR TxDes; 41 DESCRIPTOR TxDes;
42 PDESCRIPTOR pTxDes = &TxDes; 42 PDESCRIPTOR pTxDes = &TxDes;
43 PUCHAR XmitBufAddress; 43 u8 *XmitBufAddress;
44 u16 XmitBufSize, PacketSize, stmp, CurrentSize, FragmentThreshold; 44 u16 XmitBufSize, PacketSize, stmp, CurrentSize, FragmentThreshold;
45 u8 FillIndex, TxDesIndex, FragmentCount, FillCount; 45 u8 FillIndex, TxDesIndex, FragmentCount, FillCount;
46 unsigned char BufferFilled = FALSE, MICAdd = 0; 46 unsigned char BufferFilled = FALSE, MICAdd = 0;
@@ -90,7 +90,7 @@ Mds_Tx(PADAPTER Adapter)
90 BufferFilled = TRUE; 90 BufferFilled = TRUE;
91 91
92 /* Leaves first u8 intact */ 92 /* Leaves first u8 intact */
93 memset((PUCHAR)pTxDes + 1, 0, sizeof(DESCRIPTOR) - 1); 93 memset((u8 *)pTxDes + 1, 0, sizeof(DESCRIPTOR) - 1);
94 94
95 TxDesIndex = pMds->TxDesIndex;//Get the current ID 95 TxDesIndex = pMds->TxDesIndex;//Get the current ID
96 pTxDes->Descriptor_ID = TxDesIndex; 96 pTxDes->Descriptor_ID = TxDesIndex;
@@ -229,10 +229,10 @@ Mds_SendComplete(PADAPTER Adapter, PT02_DESCRIPTOR pT02)
229} 229}
230 230
231void 231void
232Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer) 232Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer)
233{ 233{
234 PMDS pMds = &Adapter->Mds; 234 PMDS pMds = &Adapter->Mds;
235 PUCHAR src_buffer = pDes->buffer_address[0];//931130.5.g 235 u8 *src_buffer = pDes->buffer_address[0];//931130.5.g
236 PT00_DESCRIPTOR pT00; 236 PT00_DESCRIPTOR pT00;
237 PT01_DESCRIPTOR pT01; 237 PT01_DESCRIPTOR pT01;
238 u16 stmp; 238 u16 stmp;
@@ -276,7 +276,7 @@ Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
276 // 276 //
277 // Set tx rate 277 // Set tx rate
278 // 278 //
279 stmp = *(PUSHORT)(TargetBuffer+30); // 2n alignment address 279 stmp = *(u16 *)(TargetBuffer+30); // 2n alignment address
280 280
281 //Use basic rate 281 //Use basic rate
282 ctmp1 = ctmpf = CURRENT_TX_RATE_FOR_MNG; 282 ctmp1 = ctmpf = CURRENT_TX_RATE_FOR_MNG;
@@ -326,11 +326,13 @@ Mds_HeaderCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
326 326
327// The function return the 4n size of usb pk 327// The function return the 4n size of usb pk
328u16 328u16
329Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer) 329Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer)
330{ 330{
331 PT00_DESCRIPTOR pT00; 331 PT00_DESCRIPTOR pT00;
332 PMDS pMds = &Adapter->Mds; 332 PMDS pMds = &Adapter->Mds;
333 PUCHAR buffer, src_buffer, pctmp; 333 u8 *buffer;
334 u8 *src_buffer;
335 u8 *pctmp;
334 u16 Size = 0; 336 u16 Size = 0;
335 u16 SizeLeft, CopySize, CopyLeft, stmp; 337 u16 SizeLeft, CopySize, CopyLeft, stmp;
336 u8 buf_index, FragmentCount = 0; 338 u8 buf_index, FragmentCount = 0;
@@ -354,7 +356,7 @@ Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
354 SizeLeft -= CopySize; 356 SizeLeft -= CopySize;
355 357
356 // 1 Byte operation 358 // 1 Byte operation
357 pctmp = (PUCHAR)( buffer + 8 + DOT_11_SEQUENCE_OFFSET ); 359 pctmp = (u8 *)( buffer + 8 + DOT_11_SEQUENCE_OFFSET );
358 *pctmp &= 0xf0; 360 *pctmp &= 0xf0;
359 *pctmp |= FragmentCount;//931130.5.m 361 *pctmp |= FragmentCount;//931130.5.m
360 if( !FragmentCount ) 362 if( !FragmentCount )
@@ -379,7 +381,7 @@ Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
379 buf_index++; 381 buf_index++;
380 buf_index %= MAX_DESCRIPTOR_BUFFER_INDEX; 382 buf_index %= MAX_DESCRIPTOR_BUFFER_INDEX;
381 } else { 383 } else {
382 PUCHAR pctmp = pDes->buffer_address[buf_index]; 384 u8 *pctmp = pDes->buffer_address[buf_index];
383 pctmp += CopySize; 385 pctmp += CopySize;
384 pDes->buffer_address[buf_index] = pctmp; 386 pDes->buffer_address[buf_index] = pctmp;
385 pDes->buffer_size[buf_index] -= CopySize; 387 pDes->buffer_size[buf_index] -= CopySize;
@@ -419,7 +421,7 @@ Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
419 421
420 pT00->T00_last_mpdu = 1; 422 pT00->T00_last_mpdu = 1;
421 pT00->T00_IsLastMpdu = 1; 423 pT00->T00_IsLastMpdu = 1;
422 buffer = (PUCHAR)pT00 + 8; // +8 for USB hdr 424 buffer = (u8 *)pT00 + 8; // +8 for USB hdr
423 buffer[1] &= ~0x04; // Clear more frag bit of 802.11 frame control 425 buffer[1] &= ~0x04; // Clear more frag bit of 802.11 frame control
424 pDes->FragmentCount = FragmentCount; // Update the correct fragment number 426 pDes->FragmentCount = FragmentCount; // Update the correct fragment number
425 return Size; 427 return Size;
@@ -427,7 +429,7 @@ Mds_BodyCopy(PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer)
427 429
428 430
429void 431void
430Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR buffer ) 432Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, u8 *buffer )
431{ 433{
432 PT00_DESCRIPTOR pT00; 434 PT00_DESCRIPTOR pT00;
433 PT01_DESCRIPTOR pT01; 435 PT01_DESCRIPTOR pT01;
@@ -435,7 +437,7 @@ Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR buffer )
435 u8 Rate, i; 437 u8 Rate, i;
436 unsigned char CTS_on = FALSE, RTS_on = FALSE; 438 unsigned char CTS_on = FALSE, RTS_on = FALSE;
437 PT00_DESCRIPTOR pNextT00; 439 PT00_DESCRIPTOR pNextT00;
438 u16 BodyLen; 440 u16 BodyLen = 0;
439 unsigned char boGroupAddr = FALSE; 441 unsigned char boGroupAddr = FALSE;
440 442
441 443
@@ -574,7 +576,7 @@ Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR buffer )
574 DEFAULT_SIFSTIME*3 ); 576 DEFAULT_SIFSTIME*3 );
575 } 577 }
576 578
577 ((PUSHORT)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration 579 ((u16 *)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
578 580
579 //----20061009 add by anson's endian 581 //----20061009 add by anson's endian
580 pNextT00->value = cpu_to_le32(pNextT00->value); 582 pNextT00->value = cpu_to_le32(pNextT00->value);
@@ -615,7 +617,7 @@ Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR buffer )
615 } 617 }
616 } 618 }
617 619
618 ((PUSHORT)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration 620 ((u16 *)buffer)[5] = cpu_to_le16(Duration);// 4 USHOR for skip 8B USB, 2USHORT=FC + Duration
619 pT00->value = cpu_to_le32(pT00->value); 621 pT00->value = cpu_to_le32(pT00->value);
620 pT01->value = cpu_to_le32(pT01->value); 622 pT01->value = cpu_to_le32(pT01->value);
621 //--end 20061009 add 623 //--end 20061009 add
diff --git a/drivers/staging/winbond/mds_f.h b/drivers/staging/winbond/mds_f.h
index 651188be1065..7a682d4cfbdc 100644
--- a/drivers/staging/winbond/mds_f.h
+++ b/drivers/staging/winbond/mds_f.h
@@ -1,9 +1,9 @@
1unsigned char Mds_initial( PADAPTER Adapter ); 1unsigned char Mds_initial( PADAPTER Adapter );
2void Mds_Destroy( PADAPTER Adapter ); 2void Mds_Destroy( PADAPTER Adapter );
3void Mds_Tx( PADAPTER Adapter ); 3void Mds_Tx( PADAPTER Adapter );
4void Mds_HeaderCopy( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer ); 4void Mds_HeaderCopy( PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer );
5u16 Mds_BodyCopy( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer ); 5u16 Mds_BodyCopy( PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer );
6void Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, PUCHAR TargetBuffer ); 6void Mds_DurationSet( PADAPTER Adapter, PDESCRIPTOR pDes, u8 *TargetBuffer );
7void Mds_SendComplete( PADAPTER Adapter, PT02_DESCRIPTOR pT02 ); 7void Mds_SendComplete( PADAPTER Adapter, PT02_DESCRIPTOR pT02 );
8void Mds_MpduProcess( PADAPTER Adapter, PDESCRIPTOR pRxDes ); 8void Mds_MpduProcess( PADAPTER Adapter, PDESCRIPTOR pRxDes );
9void Mds_reset_descriptor( PADAPTER Adapter ); 9void Mds_reset_descriptor( PADAPTER Adapter );
diff --git a/drivers/staging/winbond/mds_s.h b/drivers/staging/winbond/mds_s.h
index 4738279d5f39..9df2e0936bf8 100644
--- a/drivers/staging/winbond/mds_s.h
+++ b/drivers/staging/winbond/mds_s.h
@@ -86,7 +86,7 @@ typedef struct _MDS
86{ 86{
87 // For Tx usage 87 // For Tx usage
88 u8 TxOwner[ ((MAX_USB_TX_BUFFER_NUMBER + 3) & ~0x03) ]; 88 u8 TxOwner[ ((MAX_USB_TX_BUFFER_NUMBER + 3) & ~0x03) ];
89 PUCHAR pTxBuffer; 89 u8 *pTxBuffer;
90 u16 TxBufferSize[ ((MAX_USB_TX_BUFFER_NUMBER + 1) & ~0x01) ]; 90 u16 TxBufferSize[ ((MAX_USB_TX_BUFFER_NUMBER + 1) & ~0x01) ];
91 u8 TxDesFrom[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ];//931130.4.u // 1: MLME 2: NDIS control 3: NDIS data 91 u8 TxDesFrom[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ];//931130.4.u // 1: MLME 2: NDIS control 3: NDIS data
92 u8 TxCountInBuffer[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ]; // 20060928 92 u8 TxCountInBuffer[ ((MAX_USB_TX_DESCRIPTOR + 3) & ~0x03) ]; // 20060928
@@ -103,7 +103,7 @@ typedef struct _MDS
103 u16 TxResult[ ((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01) ];//Collect the sending result of Mpdu 103 u16 TxResult[ ((MAX_USB_TX_DESCRIPTOR + 1) & ~0x01) ];//Collect the sending result of Mpdu
104 104
105 u8 MicRedundant[8]; // For tmp use 105 u8 MicRedundant[8]; // For tmp use
106 PUCHAR MicWriteAddress[2]; //The start address to fill the Mic, use 2 point due to Mic maybe fragment 106 u8 *MicWriteAddress[2]; //The start address to fill the Mic, use 2 point due to Mic maybe fragment
107 107
108 u16 MicWriteSize[2]; //931130.4.x 108 u16 MicWriteSize[2]; //931130.4.x
109 109
@@ -144,7 +144,7 @@ typedef struct _MDS
144 144
145typedef struct _RxBuffer 145typedef struct _RxBuffer
146{ 146{
147 PUCHAR pBufferAddress; // Pointer the received data buffer. 147 u8 * pBufferAddress; // Pointer the received data buffer.
148 u16 BufferSize; 148 u16 BufferSize;
149 u8 RESERVED; 149 u8 RESERVED;
150 u8 BufferIndex;// Only 1 byte 150 u8 BufferIndex;// Only 1 byte
@@ -176,7 +176,7 @@ typedef struct _RXLAYER1
176 ///////////////////////////////////////////////////////////////////////////////////////////// 176 /////////////////////////////////////////////////////////////////////////////////////////////
177 // For brand-new Rx system 177 // For brand-new Rx system
178 u8 ReservedBuffer[ 2400 ];//If Buffer ID is reserved one, it must copy the data into this area 178 u8 ReservedBuffer[ 2400 ];//If Buffer ID is reserved one, it must copy the data into this area
179 PUCHAR ReservedBufferPoint;// Point to the next availabe address of reserved buffer 179 u8 *ReservedBufferPoint;// Point to the next availabe address of reserved buffer
180 180
181}RXLAYER1, * PRXLAYER1; 181}RXLAYER1, * PRXLAYER1;
182 182
diff --git a/drivers/staging/winbond/mlme_s.h b/drivers/staging/winbond/mlme_s.h
index 58094f61c032..039fd408ba62 100644
--- a/drivers/staging/winbond/mlme_s.h
+++ b/drivers/staging/winbond/mlme_s.h
@@ -125,12 +125,12 @@
125typedef struct _MLME_FRAME 125typedef struct _MLME_FRAME
126{ 126{
127 //NDIS_PACKET MLME_Packet; 127 //NDIS_PACKET MLME_Packet;
128 PCHAR pMMPDU; 128 s8 * pMMPDU;
129 u16 len; 129 u16 len;
130 u8 DataType; 130 u8 DataType;
131 u8 IsInUsed; 131 u8 IsInUsed;
132 132
133 OS_SPIN_LOCK MLMESpinLock; 133 spinlock_t MLMESpinLock;
134 134
135 u8 TxMMPDU[MAX_NUM_TX_MMPDU][MAX_MMPDU_SIZE]; 135 u8 TxMMPDU[MAX_NUM_TX_MMPDU][MAX_MMPDU_SIZE];
136 u8 TxMMPDUInUse[ (MAX_NUM_TX_MMPDU+3) & ~0x03 ]; 136 u8 TxMMPDUInUse[ (MAX_NUM_TX_MMPDU+3) & ~0x03 ];
diff --git a/drivers/staging/winbond/mlmetxrx.c b/drivers/staging/winbond/mlmetxrx.c
index 46b091e96794..e8533b8d1976 100644
--- a/drivers/staging/winbond/mlmetxrx.c
+++ b/drivers/staging/winbond/mlmetxrx.c
@@ -113,13 +113,13 @@ MLME_GetNextPacket(PADAPTER Adapter, PDESCRIPTOR pDes)
113 pDes->Type = Adapter->sMlmeFrame.DataType; 113 pDes->Type = Adapter->sMlmeFrame.DataType;
114} 114}
115 115
116void MLMEfreeMMPDUBuffer(PWB32_ADAPTER Adapter, PCHAR pData) 116void MLMEfreeMMPDUBuffer(PWB32_ADAPTER Adapter, s8 *pData)
117{ 117{
118 int i; 118 int i;
119 119
120 // Reclaim the data buffer 120 // Reclaim the data buffer
121 for (i = 0; i < MAX_NUM_TX_MMPDU; i++) { 121 for (i = 0; i < MAX_NUM_TX_MMPDU; i++) {
122 if (pData == (PCHAR)&(Adapter->sMlmeFrame.TxMMPDU[i])) 122 if (pData == (s8 *)&(Adapter->sMlmeFrame.TxMMPDU[i]))
123 break; 123 break;
124 } 124 }
125 if (Adapter->sMlmeFrame.TxMMPDUInUse[i]) 125 if (Adapter->sMlmeFrame.TxMMPDUInUse[i])
diff --git a/drivers/staging/winbond/mlmetxrx_f.h b/drivers/staging/winbond/mlmetxrx_f.h
index d74e225be215..24cd5f308d9f 100644
--- a/drivers/staging/winbond/mlmetxrx_f.h
+++ b/drivers/staging/winbond/mlmetxrx_f.h
@@ -20,7 +20,7 @@ MLMEGetMMPDUBuffer(
20 PWB32_ADAPTER Adapter 20 PWB32_ADAPTER Adapter
21 ); 21 );
22 22
23void MLMEfreeMMPDUBuffer( PWB32_ADAPTER Adapter, PCHAR pData); 23void MLMEfreeMMPDUBuffer( PWB32_ADAPTER Adapter, s8 * pData);
24 24
25void MLME_GetNextPacket( PADAPTER Adapter, PDESCRIPTOR pDes ); 25void MLME_GetNextPacket( PADAPTER Adapter, PDESCRIPTOR pDes );
26u8 MLMESendFrame( PWB32_ADAPTER Adapter, 26u8 MLMESendFrame( PWB32_ADAPTER Adapter,
@@ -42,7 +42,7 @@ MLMERcvFrame(
42void 42void
43MLMEReturnPacket( 43MLMEReturnPacket(
44 PWB32_ADAPTER Adapter, 44 PWB32_ADAPTER Adapter,
45 PUCHAR pRxBufer 45 u8 * pRxBufer
46 ); 46 );
47#ifdef _IBSS_BEACON_SEQ_STICK_ 47#ifdef _IBSS_BEACON_SEQ_STICK_
48s8 SendBCNullData(PWB32_ADAPTER Adapter, u16 wIdx); 48s8 SendBCNullData(PWB32_ADAPTER Adapter, u16 wIdx);
diff --git a/drivers/staging/winbond/reg.c b/drivers/staging/winbond/reg.c
index b475c7a7c424..57af5b831509 100644
--- a/drivers/staging/winbond/reg.c
+++ b/drivers/staging/winbond/reg.c
@@ -922,16 +922,16 @@ Uxx_ReadEthernetAddress( phw_data_t pHwData )
922 // Only unplug and plug again can make hardware read EEPROM again. 20060727 922 // Only unplug and plug again can make hardware read EEPROM again. 20060727
923 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08000000 ); // Start EEPROM access + Read + address(0x0d) 923 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08000000 ); // Start EEPROM access + Read + address(0x0d)
924 Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp ); 924 Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
925 *(PUSHORT)pHwData->PermanentMacAddress = cpu_to_le16((u16)ltmp); //20060926 anson's endian 925 *(u16 *)pHwData->PermanentMacAddress = cpu_to_le16((u16)ltmp); //20060926 anson's endian
926 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08010000 ); // Start EEPROM access + Read + address(0x0d) 926 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08010000 ); // Start EEPROM access + Read + address(0x0d)
927 Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp ); 927 Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
928 *(PUSHORT)(pHwData->PermanentMacAddress + 2) = cpu_to_le16((u16)ltmp); //20060926 anson's endian 928 *(u16 *)(pHwData->PermanentMacAddress + 2) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
929 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08020000 ); // Start EEPROM access + Read + address(0x0d) 929 Wb35Reg_WriteSync( pHwData, 0x03b4, 0x08020000 ); // Start EEPROM access + Read + address(0x0d)
930 Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp ); 930 Wb35Reg_ReadSync( pHwData, 0x03b4, &ltmp );
931 *(PUSHORT)(pHwData->PermanentMacAddress + 4) = cpu_to_le16((u16)ltmp); //20060926 anson's endian 931 *(u16 *)(pHwData->PermanentMacAddress + 4) = cpu_to_le16((u16)ltmp); //20060926 anson's endian
932 *(PUSHORT)(pHwData->PermanentMacAddress + 6) = 0; 932 *(u16 *)(pHwData->PermanentMacAddress + 6) = 0;
933 Wb35Reg_WriteSync( pHwData, 0x03e8, cpu_to_le32(*(PULONG)pHwData->PermanentMacAddress) ); //20060926 anson's endian 933 Wb35Reg_WriteSync( pHwData, 0x03e8, cpu_to_le32(*(u32 *)pHwData->PermanentMacAddress) ); //20060926 anson's endian
934 Wb35Reg_WriteSync( pHwData, 0x03ec, cpu_to_le32(*(PULONG)(pHwData->PermanentMacAddress+4)) ); //20060926 anson's endian 934 Wb35Reg_WriteSync( pHwData, 0x03ec, cpu_to_le32(*(u32 *)(pHwData->PermanentMacAddress+4)) ); //20060926 anson's endian
935} 935}
936 936
937 937
@@ -1038,7 +1038,7 @@ void
1038RFSynthesizer_initial(phw_data_t pHwData) 1038RFSynthesizer_initial(phw_data_t pHwData)
1039{ 1039{
1040 u32 altmp[32]; 1040 u32 altmp[32];
1041 PULONG pltmp = altmp; 1041 u32 * pltmp = altmp;
1042 u32 ltmp; 1042 u32 ltmp;
1043 u8 number=0x00; // The number of register vale 1043 u8 number=0x00; // The number of register vale
1044 u8 i; 1044 u8 i;
@@ -2358,11 +2358,11 @@ void Mxx_initial( phw_data_t pHwData )
2358 pltmp[2] = pWb35Reg->M2C_MacControl; 2358 pltmp[2] = pWb35Reg->M2C_MacControl;
2359 2359
2360 // M30 BSSID 2360 // M30 BSSID
2361 pltmp[3] = *(PULONG)pHwData->bssid; 2361 pltmp[3] = *(u32 *)pHwData->bssid;
2362 2362
2363 // M34 2363 // M34
2364 pHwData->AID = DEFAULT_AID; 2364 pHwData->AID = DEFAULT_AID;
2365 tmp = *(PUSHORT)(pHwData->bssid+4); 2365 tmp = *(u16 *)(pHwData->bssid+4);
2366 tmp |= DEFAULT_AID << 16; 2366 tmp |= DEFAULT_AID << 16;
2367 pltmp[4] = tmp; 2367 pltmp[4] = tmp;
2368 2368
@@ -2428,7 +2428,7 @@ void GetTxVgaFromEEPROM( phw_data_t pHwData )
2428{ 2428{
2429 u32 i, j, ltmp; 2429 u32 i, j, ltmp;
2430 u16 Value[MAX_TXVGA_EEPROM]; 2430 u16 Value[MAX_TXVGA_EEPROM];
2431 PUCHAR pctmp; 2431 u8 *pctmp;
2432 u8 ctmp=0; 2432 u8 ctmp=0;
2433 2433
2434 // Get the entire TxVga setting in EEPROM 2434 // Get the entire TxVga setting in EEPROM
@@ -2441,7 +2441,7 @@ void GetTxVgaFromEEPROM( phw_data_t pHwData )
2441 } 2441 }
2442 2442
2443 // Adjust the filed which fills with reserved value. 2443 // Adjust the filed which fills with reserved value.
2444 pctmp = (PUCHAR)Value; 2444 pctmp = (u8 *)Value;
2445 for( i=0; i<(MAX_TXVGA_EEPROM*2); i++ ) 2445 for( i=0; i<(MAX_TXVGA_EEPROM*2); i++ )
2446 { 2446 {
2447 if( pctmp[i] != 0xff ) 2447 if( pctmp[i] != 0xff )
@@ -2480,7 +2480,7 @@ void GetTxVgaFromEEPROM( phw_data_t pHwData )
2480// This function will use default TxVgaSettingInEEPROM data to calculate new TxVga. 2480// This function will use default TxVgaSettingInEEPROM data to calculate new TxVga.
2481void EEPROMTxVgaAdjust( phw_data_t pHwData ) // 20060619.5 Add 2481void EEPROMTxVgaAdjust( phw_data_t pHwData ) // 20060619.5 Add
2482{ 2482{
2483 PUCHAR pTxVga = pHwData->TxVgaSettingInEEPROM; 2483 u8 * pTxVga = pHwData->TxVgaSettingInEEPROM;
2484 s16 i, stmp; 2484 s16 i, stmp;
2485 2485
2486 //-- 2.4G -- 20060704.2 Request from Tiger 2486 //-- 2.4G -- 20060704.2 Request from Tiger
diff --git a/drivers/staging/winbond/sme_api.c b/drivers/staging/winbond/sme_api.c
index 40e93b7600eb..31c9673ea865 100644
--- a/drivers/staging/winbond/sme_api.c
+++ b/drivers/staging/winbond/sme_api.c
@@ -10,4 +10,5 @@
10s8 sme_get_rssi(void *pcore_data, s32 *prssi) 10s8 sme_get_rssi(void *pcore_data, s32 *prssi)
11{ 11{
12 BUG(); 12 BUG();
13 return 0;
13} 14}
diff --git a/drivers/staging/winbond/sme_api.h b/drivers/staging/winbond/sme_api.h
index 016b225ca4a4..745eb376bc70 100644
--- a/drivers/staging/winbond/sme_api.h
+++ b/drivers/staging/winbond/sme_api.h
@@ -208,7 +208,7 @@ s8 sme_set_tx_antenna(void *pcore_data, u32 TxAntenna);
208s8 sme_set_IBSS_chan(void *pcore_data, ChanInfo chan); 208s8 sme_set_IBSS_chan(void *pcore_data, ChanInfo chan);
209 209
210//20061108 WPS 210//20061108 WPS
211s8 sme_set_IE_append(void *pcore_data, PUCHAR buffer, u16 buf_len); 211s8 sme_set_IE_append(void *pcore_data, u8 *buffer, u16 buf_len);
212 212
213 213
214 214
diff --git a/drivers/staging/winbond/wbhal.c b/drivers/staging/winbond/wbhal.c
index daf442247558..5d68ecec34c7 100644
--- a/drivers/staging/winbond/wbhal.c
+++ b/drivers/staging/winbond/wbhal.c
@@ -1,13 +1,13 @@
1#include "os_common.h" 1#include "os_common.h"
2 2
3void hal_get_ethernet_address( phw_data_t pHwData, PUCHAR current_address ) 3void hal_get_ethernet_address( phw_data_t pHwData, u8 *current_address )
4{ 4{
5 if( pHwData->SurpriseRemove ) return; 5 if( pHwData->SurpriseRemove ) return;
6 6
7 memcpy( current_address, pHwData->CurrentMacAddress, ETH_LENGTH_OF_ADDRESS ); 7 memcpy( current_address, pHwData->CurrentMacAddress, ETH_LENGTH_OF_ADDRESS );
8} 8}
9 9
10void hal_set_ethernet_address( phw_data_t pHwData, PUCHAR current_address ) 10void hal_set_ethernet_address( phw_data_t pHwData, u8 *current_address )
11{ 11{
12 u32 ltmp[2]; 12 u32 ltmp[2];
13 13
@@ -15,13 +15,13 @@ void hal_set_ethernet_address( phw_data_t pHwData, PUCHAR current_address )
15 15
16 memcpy( pHwData->CurrentMacAddress, current_address, ETH_LENGTH_OF_ADDRESS ); 16 memcpy( pHwData->CurrentMacAddress, current_address, ETH_LENGTH_OF_ADDRESS );
17 17
18 ltmp[0]= cpu_to_le32( *(PULONG)pHwData->CurrentMacAddress ); 18 ltmp[0]= cpu_to_le32( *(u32 *)pHwData->CurrentMacAddress );
19 ltmp[1]= cpu_to_le32( *(PULONG)(pHwData->CurrentMacAddress + 4) ) & 0xffff; 19 ltmp[1]= cpu_to_le32( *(u32 *)(pHwData->CurrentMacAddress + 4) ) & 0xffff;
20 20
21 Wb35Reg_BurstWrite( pHwData, 0x03e8, ltmp, 2, AUTO_INCREMENT ); 21 Wb35Reg_BurstWrite( pHwData, 0x03e8, ltmp, 2, AUTO_INCREMENT );
22} 22}
23 23
24void hal_get_permanent_address( phw_data_t pHwData, PUCHAR pethernet_address ) 24void hal_get_permanent_address( phw_data_t pHwData, u8 *pethernet_address )
25{ 25{
26 if( pHwData->SurpriseRemove ) return; 26 if( pHwData->SurpriseRemove ) return;
27 27
@@ -89,7 +89,7 @@ void hal_halt(phw_data_t pHwData, void *ppa_data)
89} 89}
90 90
91//--------------------------------------------------------------------------------------------------- 91//---------------------------------------------------------------------------------------------------
92void hal_set_rates(phw_data_t pHwData, PUCHAR pbss_rates, 92void hal_set_rates(phw_data_t pHwData, u8 *pbss_rates,
93 u8 length, unsigned char basic_rate_set) 93 u8 length, unsigned char basic_rate_set)
94{ 94{
95 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 95 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
@@ -158,13 +158,13 @@ void hal_set_rates(phw_data_t pHwData, PUCHAR pbss_rates,
158 // Fill data into support rate until buffer full 158 // Fill data into support rate until buffer full
159 //---20060926 add by anson's endian 159 //---20060926 add by anson's endian
160 for (i=0; i<4; i++) 160 for (i=0; i<4; i++)
161 *(PULONG)(SupportedRate+(i<<2)) = cpu_to_le32( *(PULONG)(SupportedRate+(i<<2)) ); 161 *(u32 *)(SupportedRate+(i<<2)) = cpu_to_le32( *(u32 *)(SupportedRate+(i<<2)) );
162 //--- end 20060926 add by anson's endian 162 //--- end 20060926 add by anson's endian
163 Wb35Reg_BurstWrite( pHwData,0x087c, (PULONG)SupportedRate, 4, AUTO_INCREMENT ); 163 Wb35Reg_BurstWrite( pHwData,0x087c, (u32 *)SupportedRate, 4, AUTO_INCREMENT );
164 pWb35Reg->M7C_MacControl = ((PULONG)SupportedRate)[0]; 164 pWb35Reg->M7C_MacControl = ((u32 *)SupportedRate)[0];
165 pWb35Reg->M80_MacControl = ((PULONG)SupportedRate)[1]; 165 pWb35Reg->M80_MacControl = ((u32 *)SupportedRate)[1];
166 pWb35Reg->M84_MacControl = ((PULONG)SupportedRate)[2]; 166 pWb35Reg->M84_MacControl = ((u32 *)SupportedRate)[2];
167 pWb35Reg->M88_MacControl = ((PULONG)SupportedRate)[3]; 167 pWb35Reg->M88_MacControl = ((u32 *)SupportedRate)[3];
168 168
169 // Fill length 169 // Fill length
170 tmp = Count1<<28 | Count2<<24; 170 tmp = Count1<<28 | Count2<<24;
@@ -206,7 +206,7 @@ void hal_set_current_channel_ex( phw_data_t pHwData, ChanInfo channel )
206 pWb35Reg->M28_MacControl &= ~0xff; // Clean channel information field 206 pWb35Reg->M28_MacControl &= ~0xff; // Clean channel information field
207 pWb35Reg->M28_MacControl |= channel.ChanNo; 207 pWb35Reg->M28_MacControl |= channel.ChanNo;
208 Wb35Reg_WriteWithCallbackValue( pHwData, 0x0828, pWb35Reg->M28_MacControl, 208 Wb35Reg_WriteWithCallbackValue( pHwData, 0x0828, pWb35Reg->M28_MacControl,
209 (PCHAR)&channel, sizeof(ChanInfo)); 209 (s8 *)&channel, sizeof(ChanInfo));
210} 210}
211//--------------------------------------------------------------------------------------------------- 211//---------------------------------------------------------------------------------------------------
212void hal_set_current_channel( phw_data_t pHwData, ChanInfo channel ) 212void hal_set_current_channel( phw_data_t pHwData, ChanInfo channel )
@@ -277,7 +277,7 @@ void hal_set_accept_beacon( phw_data_t pHwData, u8 enable )
277 Wb35Reg_Write( pHwData, 0x0800, pWb35Reg->M00_MacControl ); 277 Wb35Reg_Write( pHwData, 0x0800, pWb35Reg->M00_MacControl );
278} 278}
279//--------------------------------------------------------------------------------------------------- 279//---------------------------------------------------------------------------------------------------
280void hal_set_multicast_address( phw_data_t pHwData, PUCHAR address, u8 number ) 280void hal_set_multicast_address( phw_data_t pHwData, u8 *address, u8 number )
281{ 281{
282 PWB35REG pWb35Reg = &pHwData->Wb35Reg; 282 PWB35REG pWb35Reg = &pHwData->Wb35Reg;
283 u8 Byte, Bit; 283 u8 Byte, Bit;
@@ -297,7 +297,7 @@ void hal_set_multicast_address( phw_data_t pHwData, PUCHAR address, u8 number )
297 } 297 }
298 298
299 // Updating register 299 // Updating register
300 Wb35Reg_BurstWrite( pHwData, 0x0804, (PULONG)pWb35Reg->Multicast, 2, AUTO_INCREMENT ); 300 Wb35Reg_BurstWrite( pHwData, 0x0804, (u32 *)pWb35Reg->Multicast, 2, AUTO_INCREMENT );
301} 301}
302//--------------------------------------------------------------------------------------------------- 302//---------------------------------------------------------------------------------------------------
303u8 hal_get_accept_beacon( phw_data_t pHwData ) 303u8 hal_get_accept_beacon( phw_data_t pHwData )
@@ -806,7 +806,7 @@ u8 hal_get_hw_radio_off( phw_data_t pHwData )
806 } 806 }
807} 807}
808 808
809unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, PULONG pValue ) 809unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, u32 * pValue )
810{ 810{
811 if( number < 0x1000 ) 811 if( number < 0x1000 )
812 number += 0x1000; 812 number += 0x1000;
diff --git a/drivers/staging/winbond/wbhal_f.h b/drivers/staging/winbond/wbhal_f.h
index fe25f97af724..ea9531ac8474 100644
--- a/drivers/staging/winbond/wbhal_f.h
+++ b/drivers/staging/winbond/wbhal_f.h
@@ -16,23 +16,23 @@
16//==================================================================================== 16//====================================================================================
17// Function declaration 17// Function declaration
18//==================================================================================== 18//====================================================================================
19void hal_remove_mapping_key( phw_data_t pHwData, PUCHAR pmac_addr ); 19void hal_remove_mapping_key( phw_data_t pHwData, u8 *pmac_addr );
20void hal_remove_default_key( phw_data_t pHwData, u32 index ); 20void hal_remove_default_key( phw_data_t pHwData, u32 index );
21unsigned char hal_set_mapping_key( phw_data_t Adapter, PUCHAR pmac_addr, u8 null_key, u8 wep_on, PUCHAR ptx_tsc, PUCHAR prx_tsc, u8 key_type, u8 key_len, PUCHAR pkey_data ); 21unsigned char hal_set_mapping_key( phw_data_t Adapter, u8 *pmac_addr, u8 null_key, u8 wep_on, u8 *ptx_tsc, u8 *prx_tsc, u8 key_type, u8 key_len, u8 *pkey_data );
22unsigned char hal_set_default_key( phw_data_t Adapter, u8 index, u8 null_key, u8 wep_on, PUCHAR ptx_tsc, PUCHAR prx_tsc, u8 key_type, u8 key_len, PUCHAR pkey_data ); 22unsigned char hal_set_default_key( phw_data_t Adapter, u8 index, u8 null_key, u8 wep_on, u8 *ptx_tsc, u8 *prx_tsc, u8 key_type, u8 key_len, u8 *pkey_data );
23void hal_clear_all_default_key( phw_data_t pHwData ); 23void hal_clear_all_default_key( phw_data_t pHwData );
24void hal_clear_all_group_key( phw_data_t pHwData ); 24void hal_clear_all_group_key( phw_data_t pHwData );
25void hal_clear_all_mapping_key( phw_data_t pHwData ); 25void hal_clear_all_mapping_key( phw_data_t pHwData );
26void hal_clear_all_key( phw_data_t pHwData ); 26void hal_clear_all_key( phw_data_t pHwData );
27void hal_get_ethernet_address( phw_data_t pHwData, PUCHAR current_address ); 27void hal_get_ethernet_address( phw_data_t pHwData, u8 *current_address );
28void hal_set_ethernet_address( phw_data_t pHwData, PUCHAR current_address ); 28void hal_set_ethernet_address( phw_data_t pHwData, u8 *current_address );
29void hal_get_permanent_address( phw_data_t pHwData, PUCHAR pethernet_address ); 29void hal_get_permanent_address( phw_data_t pHwData, u8 *pethernet_address );
30unsigned char hal_init_hardware( phw_data_t pHwData, PADAPTER Adapter ); 30unsigned char hal_init_hardware( phw_data_t pHwData, PADAPTER Adapter );
31void hal_set_power_save_mode( phw_data_t pHwData, unsigned char power_save, unsigned char wakeup, unsigned char dtim ); 31void hal_set_power_save_mode( phw_data_t pHwData, unsigned char power_save, unsigned char wakeup, unsigned char dtim );
32void hal_get_power_save_mode( phw_data_t pHwData, PBOOLEAN pin_pwr_save ); 32void hal_get_power_save_mode( phw_data_t pHwData, u8 *pin_pwr_save );
33void hal_set_slot_time( phw_data_t pHwData, u8 type ); 33void hal_set_slot_time( phw_data_t pHwData, u8 type );
34#define hal_set_atim_window( _A, _ATM ) 34#define hal_set_atim_window( _A, _ATM )
35void hal_set_rates( phw_data_t pHwData, PUCHAR pbss_rates, u8 length, unsigned char basic_rate_set ); 35void hal_set_rates( phw_data_t pHwData, u8 *pbss_rates, u8 length, unsigned char basic_rate_set );
36#define hal_set_basic_rates( _A, _R, _L ) hal_set_rates( _A, _R, _L, TRUE ) 36#define hal_set_basic_rates( _A, _R, _L ) hal_set_rates( _A, _R, _L, TRUE )
37#define hal_set_op_rates( _A, _R, _L ) hal_set_rates( _A, _R, _L, FALSE ) 37#define hal_set_op_rates( _A, _R, _L ) hal_set_rates( _A, _R, _L, FALSE )
38void hal_start_bss( phw_data_t pHwData, u8 mac_op_mode ); 38void hal_start_bss( phw_data_t pHwData, u8 mac_op_mode );
@@ -40,19 +40,19 @@ void hal_join_request( phw_data_t pHwData, u8 bss_type ); // 0:BSS STA 1:IBSS
40void hal_stop_sync_bss( phw_data_t pHwData ); 40void hal_stop_sync_bss( phw_data_t pHwData );
41void hal_resume_sync_bss( phw_data_t pHwData); 41void hal_resume_sync_bss( phw_data_t pHwData);
42void hal_set_aid( phw_data_t pHwData, u16 aid ); 42void hal_set_aid( phw_data_t pHwData, u16 aid );
43void hal_set_bssid( phw_data_t pHwData, PUCHAR pbssid ); 43void hal_set_bssid( phw_data_t pHwData, u8 *pbssid );
44void hal_get_bssid( phw_data_t pHwData, PUCHAR pbssid ); 44void hal_get_bssid( phw_data_t pHwData, u8 *pbssid );
45void hal_set_beacon_period( phw_data_t pHwData, u16 beacon_period ); 45void hal_set_beacon_period( phw_data_t pHwData, u16 beacon_period );
46void hal_set_listen_interval( phw_data_t pHwData, u16 listen_interval ); 46void hal_set_listen_interval( phw_data_t pHwData, u16 listen_interval );
47void hal_set_cap_info( phw_data_t pHwData, u16 capability_info ); 47void hal_set_cap_info( phw_data_t pHwData, u16 capability_info );
48void hal_set_ssid( phw_data_t pHwData, PUCHAR pssid, u8 ssid_len ); 48void hal_set_ssid( phw_data_t pHwData, u8 *pssid, u8 ssid_len );
49void hal_set_current_channel( phw_data_t pHwData, ChanInfo channel ); 49void hal_set_current_channel( phw_data_t pHwData, ChanInfo channel );
50void hal_set_current_channel_ex( phw_data_t pHwData, ChanInfo channel ); 50void hal_set_current_channel_ex( phw_data_t pHwData, ChanInfo channel );
51void hal_get_current_channel( phw_data_t pHwData, ChanInfo *channel ); 51void hal_get_current_channel( phw_data_t pHwData, ChanInfo *channel );
52void hal_set_accept_broadcast( phw_data_t pHwData, u8 enable ); 52void hal_set_accept_broadcast( phw_data_t pHwData, u8 enable );
53void hal_set_accept_multicast( phw_data_t pHwData, u8 enable ); 53void hal_set_accept_multicast( phw_data_t pHwData, u8 enable );
54void hal_set_accept_beacon( phw_data_t pHwData, u8 enable ); 54void hal_set_accept_beacon( phw_data_t pHwData, u8 enable );
55void hal_set_multicast_address( phw_data_t pHwData, PUCHAR address, u8 number ); 55void hal_set_multicast_address( phw_data_t pHwData, u8 *address, u8 number );
56u8 hal_get_accept_beacon( phw_data_t pHwData ); 56u8 hal_get_accept_beacon( phw_data_t pHwData );
57void hal_stop( phw_data_t pHwData ); 57void hal_stop( phw_data_t pHwData );
58void hal_halt( phw_data_t pHwData, void *ppa_data ); 58void hal_halt( phw_data_t pHwData, void *ppa_data );
@@ -97,7 +97,7 @@ void hal_surprise_remove( phw_data_t pHwData );
97 97
98 98
99void hal_rate_change( phw_data_t pHwData ); // Notify the HAL rate is changing 20060613.1 99void hal_rate_change( phw_data_t pHwData ); // Notify the HAL rate is changing 20060613.1
100unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, PULONG pValue ); 100unsigned char hal_get_dxx_reg( phw_data_t pHwData, u16 number, u32 * pValue );
101unsigned char hal_set_dxx_reg( phw_data_t pHwData, u16 number, u32 value ); 101unsigned char hal_set_dxx_reg( phw_data_t pHwData, u16 number, u32 value );
102#define hal_get_time_count( _P ) (_P->time_count/10) // return 100ms count 102#define hal_get_time_count( _P ) (_P->time_count/10) // return 100ms count
103#define hal_detect_error( _P ) (_P->WbUsb.DetectCount) 103#define hal_detect_error( _P ) (_P->WbUsb.DetectCount)
@@ -116,7 +116,7 @@ unsigned char hal_idle( phw_data_t pHwData );
116#define pa_stall_execution( _A ) //OS_SLEEP( 1 ) 116#define pa_stall_execution( _A ) //OS_SLEEP( 1 )
117#define hw_get_cxx_reg( _A, _B, _C ) 117#define hw_get_cxx_reg( _A, _B, _C )
118#define hw_set_cxx_reg( _A, _B, _C ) 118#define hw_set_cxx_reg( _A, _B, _C )
119#define hw_get_dxx_reg( _A, _B, _C ) hal_get_dxx_reg( _A, _B, (PULONG)_C ) 119#define hw_get_dxx_reg( _A, _B, _C ) hal_get_dxx_reg( _A, _B, (u32 *)_C )
120#define hw_set_dxx_reg( _A, _B, _C ) hal_set_dxx_reg( _A, _B, (u32)_C ) 120#define hw_set_dxx_reg( _A, _B, _C ) hal_set_dxx_reg( _A, _B, (u32)_C )
121 121
122 122
diff --git a/drivers/staging/winbond/wbhal_s.h b/drivers/staging/winbond/wbhal_s.h
index 5b862ff357bd..2ee3f0fc1ad8 100644
--- a/drivers/staging/winbond/wbhal_s.h
+++ b/drivers/staging/winbond/wbhal_s.h
@@ -461,7 +461,7 @@ typedef struct _HW_DATA_T
461 //===================================================================== 461 //=====================================================================
462 // Definition for 802.11 462 // Definition for 802.11
463 //===================================================================== 463 //=====================================================================
464 PUCHAR bssid_pointer; // Used by hal_get_bssid for return value 464 u8 *bssid_pointer; // Used by hal_get_bssid for return value
465 u8 bssid[8];// Only 6 byte will be used. 8 byte is required for read buffer 465 u8 bssid[8];// Only 6 byte will be used. 8 byte is required for read buffer
466 u8 ssid[32];// maximum ssid length is 32 byte 466 u8 ssid[32];// maximum ssid length is 32 byte
467 467
@@ -486,7 +486,7 @@ typedef struct _HW_DATA_T
486 u32 CurrentRadioSw; // 20060320.2 0:On 1:Off 486 u32 CurrentRadioSw; // 20060320.2 0:On 1:Off
487 u32 CurrentRadioHw; // 20060825 0:On 1:Off 487 u32 CurrentRadioHw; // 20060825 0:On 1:Off
488 488
489 PUCHAR power_save_point; // Used by hal_get_power_save_mode for return value 489 u8 *power_save_point; // Used by hal_get_power_save_mode for return value
490 u8 cwmin; 490 u8 cwmin;
491 u8 desired_power_save; 491 u8 desired_power_save;
492 u8 dtim;// Is running dtim 492 u8 dtim;// Is running dtim
diff --git a/drivers/staging/winbond/wblinux.c b/drivers/staging/winbond/wblinux.c
index 2eade5a47b19..4ed45e488318 100644
--- a/drivers/staging/winbond/wblinux.c
+++ b/drivers/staging/winbond/wblinux.c
@@ -25,11 +25,11 @@ EncapAtomicInc(PADAPTER Adapter, void* pAtomic)
25{ 25{
26 PWBLINUX pWbLinux = &Adapter->WbLinux; 26 PWBLINUX pWbLinux = &Adapter->WbLinux;
27 u32 ltmp; 27 u32 ltmp;
28 PULONG pltmp = (PULONG)pAtomic; 28 u32 * pltmp = (u32 *)pAtomic;
29 OS_SPIN_LOCK_ACQUIRED( &pWbLinux->AtomicSpinLock ); 29 spin_lock_irq( &pWbLinux->AtomicSpinLock );
30 (*pltmp)++; 30 (*pltmp)++;
31 ltmp = (*pltmp); 31 ltmp = (*pltmp);
32 OS_SPIN_LOCK_RELEASED( &pWbLinux->AtomicSpinLock ); 32 spin_unlock_irq( &pWbLinux->AtomicSpinLock );
33 return ltmp; 33 return ltmp;
34} 34}
35 35
@@ -38,11 +38,11 @@ EncapAtomicDec(PADAPTER Adapter, void* pAtomic)
38{ 38{
39 PWBLINUX pWbLinux = &Adapter->WbLinux; 39 PWBLINUX pWbLinux = &Adapter->WbLinux;
40 u32 ltmp; 40 u32 ltmp;
41 PULONG pltmp = (PULONG)pAtomic; 41 u32 * pltmp = (u32 *)pAtomic;
42 OS_SPIN_LOCK_ACQUIRED( &pWbLinux->AtomicSpinLock ); 42 spin_lock_irq( &pWbLinux->AtomicSpinLock );
43 (*pltmp)--; 43 (*pltmp)--;
44 ltmp = (*pltmp); 44 ltmp = (*pltmp);
45 OS_SPIN_LOCK_RELEASED( &pWbLinux->AtomicSpinLock ); 45 spin_unlock_irq( &pWbLinux->AtomicSpinLock );
46 return ltmp; 46 return ltmp;
47} 47}
48 48
@@ -51,8 +51,8 @@ WBLINUX_Initial(PADAPTER Adapter)
51{ 51{
52 PWBLINUX pWbLinux = &Adapter->WbLinux; 52 PWBLINUX pWbLinux = &Adapter->WbLinux;
53 53
54 OS_SPIN_LOCK_ALLOCATE( &pWbLinux->SpinLock ); 54 spin_lock_init( &pWbLinux->SpinLock );
55 OS_SPIN_LOCK_ALLOCATE( &pWbLinux->AtomicSpinLock ); 55 spin_lock_init( &pWbLinux->AtomicSpinLock );
56 return TRUE; 56 return TRUE;
57} 57}
58 58
@@ -79,7 +79,6 @@ void
79WBLINUX_Destroy(PADAPTER Adapter) 79WBLINUX_Destroy(PADAPTER Adapter)
80{ 80{
81 WBLINUX_stop( Adapter ); 81 WBLINUX_stop( Adapter );
82 OS_SPIN_LOCK_FREE( &pWbNdis->SpinLock );
83#ifdef _PE_USB_INI_DUMP_ 82#ifdef _PE_USB_INI_DUMP_
84 WBDEBUG(("[w35und] unregister_netdev!\n")); 83 WBDEBUG(("[w35und] unregister_netdev!\n"));
85#endif 84#endif
@@ -142,119 +141,118 @@ unsigned char
142WbWLanInitialize(PADAPTER Adapter) 141WbWLanInitialize(PADAPTER Adapter)
143{ 142{
144 phw_data_t pHwData; 143 phw_data_t pHwData;
145 PUCHAR pMacAddr, pMacAddr2; 144 u8 *pMacAddr;
145 u8 *pMacAddr2;
146 u32 InitStep = 0; 146 u32 InitStep = 0;
147 u8 EEPROM_region; 147 u8 EEPROM_region;
148 u8 HwRadioOff; 148 u8 HwRadioOff;
149 149
150 do { 150 //
151 // 151 // Setting default value for Linux
152 // Setting default value for Linux 152 //
153 // 153 Adapter->sLocalPara.region_INF = REGION_AUTO;
154 Adapter->sLocalPara.region_INF = REGION_AUTO; 154 Adapter->sLocalPara.TxRateMode = RATE_AUTO;
155 Adapter->sLocalPara.TxRateMode = RATE_AUTO; 155 psLOCAL->bMacOperationMode = MODE_802_11_BG; // B/G mode
156 psLOCAL->bMacOperationMode = MODE_802_11_BG; // B/G mode 156 Adapter->Mds.TxRTSThreshold = DEFAULT_RTSThreshold;
157 Adapter->Mds.TxRTSThreshold = DEFAULT_RTSThreshold; 157 Adapter->Mds.TxFragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD;
158 Adapter->Mds.TxFragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD; 158 hal_set_phy_type( &Adapter->sHwData, RF_WB_242_1 );
159 hal_set_phy_type( &Adapter->sHwData, RF_WB_242_1 ); 159 Adapter->sLocalPara.MTUsize = MAX_ETHERNET_PACKET_SIZE;
160 Adapter->sLocalPara.MTUsize = MAX_ETHERNET_PACKET_SIZE; 160 psLOCAL->bPreambleMode = AUTO_MODE;
161 psLOCAL->bPreambleMode = AUTO_MODE; 161 Adapter->sLocalPara.RadioOffStatus.boSwRadioOff = FALSE;
162 Adapter->sLocalPara.RadioOffStatus.boSwRadioOff = FALSE; 162 pHwData = &Adapter->sHwData;
163 pHwData = &Adapter->sHwData; 163 hal_set_phy_type( pHwData, RF_DECIDE_BY_INF );
164 hal_set_phy_type( pHwData, RF_DECIDE_BY_INF ); 164
165 165 //
166 // 166 // Initial each module and variable
167 // Initial each module and variable 167 //
168 // 168 if (!WBLINUX_Initial(Adapter)) {
169 if (!WBLINUX_Initial(Adapter)) {
170#ifdef _PE_USB_INI_DUMP_ 169#ifdef _PE_USB_INI_DUMP_
171 WBDEBUG(("[w35und]WBNDIS initialization failed\n")); 170 WBDEBUG(("[w35und]WBNDIS initialization failed\n"));
172#endif 171#endif
173 break; 172 goto error;
174 } 173 }
175 174
176 // Initial Software variable 175 // Initial Software variable
177 Adapter->sLocalPara.ShutDowned = FALSE; 176 Adapter->sLocalPara.ShutDowned = FALSE;
178 177
179 //added by ws for wep key error detection 178 //added by ws for wep key error detection
180 Adapter->sLocalPara.bWepKeyError= FALSE; 179 Adapter->sLocalPara.bWepKeyError= FALSE;
181 Adapter->sLocalPara.bToSelfPacketReceived = FALSE; 180 Adapter->sLocalPara.bToSelfPacketReceived = FALSE;
182 Adapter->sLocalPara.WepKeyDetectTimerCount= 2 * 100; /// 2 seconds 181 Adapter->sLocalPara.WepKeyDetectTimerCount= 2 * 100; /// 2 seconds
183 182
184 // Initial USB hal 183 // Initial USB hal
185 InitStep = 1; 184 InitStep = 1;
186 pHwData = &Adapter->sHwData; 185 pHwData = &Adapter->sHwData;
187 if (!hal_init_hardware(pHwData, Adapter)) 186 if (!hal_init_hardware(pHwData, Adapter))
188 break; 187 goto error;
189 188
190 EEPROM_region = hal_get_region_from_EEPROM( pHwData ); 189 EEPROM_region = hal_get_region_from_EEPROM( pHwData );
191 if (EEPROM_region != REGION_AUTO) 190 if (EEPROM_region != REGION_AUTO)
192 psLOCAL->region = EEPROM_region; 191 psLOCAL->region = EEPROM_region;
193 else { 192 else {
194 if (psLOCAL->region_INF != REGION_AUTO) 193 if (psLOCAL->region_INF != REGION_AUTO)
195 psLOCAL->region = psLOCAL->region_INF; 194 psLOCAL->region = psLOCAL->region_INF;
196 else 195 else
197 psLOCAL->region = REGION_USA; //default setting 196 psLOCAL->region = REGION_USA; //default setting
198 } 197 }
199 198
200 // Get Software setting flag from hal 199 // Get Software setting flag from hal
201 Adapter->sLocalPara.boAntennaDiversity = FALSE; 200 Adapter->sLocalPara.boAntennaDiversity = FALSE;
202 if (hal_software_set(pHwData) & 0x00000001) 201 if (hal_software_set(pHwData) & 0x00000001)
203 Adapter->sLocalPara.boAntennaDiversity = TRUE; 202 Adapter->sLocalPara.boAntennaDiversity = TRUE;
204 203
205 // 204 //
206 // For TS module 205 // For TS module
207 // 206 //
208 InitStep = 2; 207 InitStep = 2;
209 208
210 // For MDS module 209 // For MDS module
211 InitStep = 3; 210 InitStep = 3;
212 Mds_initial(Adapter); 211 Mds_initial(Adapter);
213 212
214 //======================================= 213 //=======================================
215 // Initialize the SME, SCAN, MLME, ROAM 214 // Initialize the SME, SCAN, MLME, ROAM
216 //======================================= 215 //=======================================
217 InitStep = 4; 216 InitStep = 4;
218 InitStep = 5; 217 InitStep = 5;
219 InitStep = 6; 218 InitStep = 6;
220 219
221 // If no user-defined address in the registry, use the addresss "burned" on the NIC instead. 220 // If no user-defined address in the registry, use the addresss "burned" on the NIC instead.
222 pMacAddr = Adapter->sLocalPara.ThisMacAddress; 221 pMacAddr = Adapter->sLocalPara.ThisMacAddress;
223 pMacAddr2 = Adapter->sLocalPara.PermanentAddress; 222 pMacAddr2 = Adapter->sLocalPara.PermanentAddress;
224 hal_get_permanent_address( pHwData, Adapter->sLocalPara.PermanentAddress );// Reading ethernet address from EEPROM 223 hal_get_permanent_address( pHwData, Adapter->sLocalPara.PermanentAddress );// Reading ethernet address from EEPROM
225 if (OS_MEMORY_COMPARE(pMacAddr, "\x00\x00\x00\x00\x00\x00", MAC_ADDR_LENGTH )) // Is equal 224 if (OS_MEMORY_COMPARE(pMacAddr, "\x00\x00\x00\x00\x00\x00", MAC_ADDR_LENGTH )) // Is equal
226 { 225 {
227 memcpy( pMacAddr, pMacAddr2, MAC_ADDR_LENGTH ); 226 memcpy( pMacAddr, pMacAddr2, MAC_ADDR_LENGTH );
228 } else { 227 } else {
229 // Set the user define MAC address 228 // Set the user define MAC address
230 hal_set_ethernet_address( pHwData, Adapter->sLocalPara.ThisMacAddress ); 229 hal_set_ethernet_address( pHwData, Adapter->sLocalPara.ThisMacAddress );
231 } 230 }
232 231
233 //get current antenna 232 //get current antenna
234 psLOCAL->bAntennaNo = hal_get_antenna_number(pHwData); 233 psLOCAL->bAntennaNo = hal_get_antenna_number(pHwData);
235#ifdef _PE_STATE_DUMP_ 234#ifdef _PE_STATE_DUMP_
236 WBDEBUG(("Driver init, antenna no = %d\n", psLOCAL->bAntennaNo)); 235 WBDEBUG(("Driver init, antenna no = %d\n", psLOCAL->bAntennaNo));
237#endif 236#endif
238 hal_get_hw_radio_off( pHwData ); 237 hal_get_hw_radio_off( pHwData );
239 238
240 // Waiting for HAL setting OK 239 // Waiting for HAL setting OK
241 while (!hal_idle(pHwData)) 240 while (!hal_idle(pHwData))
242 OS_SLEEP(10000); 241 OS_SLEEP(10000);
243 242
244 MTO_Init(Adapter); 243 MTO_Init(Adapter);
245 244
246 HwRadioOff = hal_get_hw_radio_off( pHwData ); 245 HwRadioOff = hal_get_hw_radio_off( pHwData );
247 psLOCAL->RadioOffStatus.boHwRadioOff = !!HwRadioOff; 246 psLOCAL->RadioOffStatus.boHwRadioOff = !!HwRadioOff;
248 247
249 hal_set_radio_mode( pHwData, (unsigned char)(psLOCAL->RadioOffStatus.boSwRadioOff || psLOCAL->RadioOffStatus.boHwRadioOff) ); 248 hal_set_radio_mode( pHwData, (unsigned char)(psLOCAL->RadioOffStatus.boSwRadioOff || psLOCAL->RadioOffStatus.boHwRadioOff) );
250 249
251 hal_driver_init_OK(pHwData) = 1; // Notify hal that the driver is ready now. 250 hal_driver_init_OK(pHwData) = 1; // Notify hal that the driver is ready now.
252 //set a tx power for reference..... 251 //set a tx power for reference.....
253// sme_set_tx_power_level(Adapter, 12); FIXME? 252// sme_set_tx_power_level(Adapter, 12); FIXME?
254 return TRUE; 253 return TRUE;
255 }
256 while(FALSE);
257 254
255error:
258 switch (InitStep) { 256 switch (InitStep) {
259 case 5: 257 case 5:
260 case 4: 258 case 4:
diff --git a/drivers/staging/winbond/wblinux_s.h b/drivers/staging/winbond/wblinux_s.h
index 97e9167ab839..fd2bb43bf3cf 100644
--- a/drivers/staging/winbond/wblinux_s.h
+++ b/drivers/staging/winbond/wblinux_s.h
@@ -24,8 +24,8 @@
24 24
25typedef struct _WBLINUX 25typedef struct _WBLINUX
26{ 26{
27 OS_SPIN_LOCK AtomicSpinLock; 27 spinlock_t AtomicSpinLock;
28 OS_SPIN_LOCK SpinLock; 28 spinlock_t SpinLock;
29 u32 shutdown; 29 u32 shutdown;
30 30
31 OS_ATOMIC ThreadCount; 31 OS_ATOMIC ThreadCount;
diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig
index 10b1f0f634d3..2425d860dcaf 100644
--- a/drivers/staging/wlan-ng/Kconfig
+++ b/drivers/staging/wlan-ng/Kconfig
@@ -1,6 +1,6 @@
1config PRISM2_USB 1config PRISM2_USB
2 tristate "Prism2.5 USB driver" 2 tristate "Prism2.5 USB driver"
3 depends on USB 3 depends on WLAN_80211 && USB
4 default n 4 default n
5 ---help--- 5 ---help---
6 This is the wlan-ng prism 2.5 USB driver for a wide range of 6 This is the wlan-ng prism 2.5 USB driver for a wide range of
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index a2054639d24b..0dfb8ce9aae7 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -824,7 +824,7 @@ PD Record codes
824#define HFA384x_CMD_MACPORT_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET(value)) 824#define HFA384x_CMD_MACPORT_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET(value))
825#define HFA384x_CMD_ISRECL(value) ((UINT16)(HFA384x_CMD_AINFO_GET((UINT16)(value) & HFA384x_CMD_RECL))) 825#define HFA384x_CMD_ISRECL(value) ((UINT16)(HFA384x_CMD_AINFO_GET((UINT16)(value) & HFA384x_CMD_RECL)))
826#define HFA384x_CMD_RECL_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET(value)) 826#define HFA384x_CMD_RECL_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET(value))
827#define HFA384x_CMD_QOS_GET(value) ((UINT16((((UINT16)(value))&((UINT16)0x3000)) >> 12)) 827#define HFA384x_CMD_QOS_GET(value) ((UINT16)((((UINT16)(value))&((UINT16)0x3000)) >> 12))
828#define HFA384x_CMD_QOS_SET(value) ((UINT16)((((UINT16)(value)) << 12) & 0x3000)) 828#define HFA384x_CMD_QOS_SET(value) ((UINT16)((((UINT16)(value)) << 12) & 0x3000))
829#define HFA384x_CMD_ISWRITE(value) ((UINT16)(HFA384x_CMD_AINFO_GET((UINT16)(value) & HFA384x_CMD_WRITE))) 829#define HFA384x_CMD_ISWRITE(value) ((UINT16)(HFA384x_CMD_AINFO_GET((UINT16)(value) & HFA384x_CMD_WRITE)))
830#define HFA384x_CMD_WRITE_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET((UINT16)value)) 830#define HFA384x_CMD_WRITE_SET(value) ((UINT16)HFA384x_CMD_AINFO_SET((UINT16)value))
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 53fe2985971f..11a50c7fbfc8 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -64,7 +64,6 @@
64/*================================================================*/ 64/*================================================================*/
65/* Project Includes */ 65/* Project Includes */
66 66
67#include "version.h"
68#include "p80211hdr.h" 67#include "p80211hdr.h"
69#include "p80211types.h" 68#include "p80211types.h"
70#include "p80211msg.h" 69#include "p80211msg.h"
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 268fd9bba1ef..eac06f793d81 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -90,8 +90,6 @@
90#include <linux/usb.h> 90#include <linux/usb.h>
91//#endif 91//#endif
92 92
93#include "wlan_compat.h"
94
95/*================================================================*/ 93/*================================================================*/
96/* Project Includes */ 94/* Project Includes */
97 95
diff --git a/drivers/staging/wlan-ng/wlan_compat.h b/drivers/staging/wlan-ng/wlan_compat.h
index 17026570708f..59dfa8f84cbe 100644
--- a/drivers/staging/wlan-ng/wlan_compat.h
+++ b/drivers/staging/wlan-ng/wlan_compat.h
@@ -245,11 +245,11 @@ typedef int64_t INT64;
245# define preempt_count() (0UL) 245# define preempt_count() (0UL)
246#endif 246#endif
247 247
248#define WLAN_LOG_ERROR(x,args...) printk(KERN_ERR "%s: " x , __FUNCTION__ , ##args); 248#define WLAN_LOG_ERROR(x,args...) printk(KERN_ERR "%s: " x , __func__ , ##args);
249 249
250#define WLAN_LOG_WARNING(x,args...) printk(KERN_WARNING "%s: " x , __FUNCTION__ , ##args); 250#define WLAN_LOG_WARNING(x,args...) printk(KERN_WARNING "%s: " x , __func__ , ##args);
251 251
252#define WLAN_LOG_NOTICE(x,args...) printk(KERN_NOTICE "%s: " x , __FUNCTION__ , ##args); 252#define WLAN_LOG_NOTICE(x,args...) printk(KERN_NOTICE "%s: " x , __func__ , ##args);
253 253
254#define WLAN_LOG_INFO(args... ) printk(KERN_INFO args) 254#define WLAN_LOG_INFO(args... ) printk(KERN_INFO args)
255 255
@@ -265,7 +265,7 @@ typedef int64_t INT64;
265 #define DBFENTER { if ( WLAN_DBVAR >= 5 ){ WLAN_LOG_DEBUG(3,"---->\n"); } } 265 #define DBFENTER { if ( WLAN_DBVAR >= 5 ){ WLAN_LOG_DEBUG(3,"---->\n"); } }
266 #define DBFEXIT { if ( WLAN_DBVAR >= 5 ){ WLAN_LOG_DEBUG(3,"<----\n"); } } 266 #define DBFEXIT { if ( WLAN_DBVAR >= 5 ){ WLAN_LOG_DEBUG(3,"<----\n"); } }
267 267
268 #define WLAN_LOG_DEBUG(l,x,args...) if ( WLAN_DBVAR >= (l)) printk(KERN_DEBUG "%s(%lu): " x , __FUNCTION__, (preempt_count() & PREEMPT_MASK), ##args ); 268 #define WLAN_LOG_DEBUG(l,x,args...) if ( WLAN_DBVAR >= (l)) printk(KERN_DEBUG "%s(%lu): " x , __func__, (preempt_count() & PREEMPT_MASK), ##args );
269#else 269#else
270 #define WLAN_ASSERT(c) 270 #define WLAN_ASSERT(c)
271 #define WLAN_HEX_DUMP( l, s, p, n) 271 #define WLAN_HEX_DUMP( l, s, p, n)
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index bcefbddeba50..289d81adfb9c 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -36,7 +36,8 @@ config USB_ARCH_HAS_OHCI
36 default y if PXA3xx 36 default y if PXA3xx
37 default y if ARCH_EP93XX 37 default y if ARCH_EP93XX
38 default y if ARCH_AT91 38 default y if ARCH_AT91
39 default y if ARCH_PNX4008 39 default y if ARCH_PNX4008 && I2C
40 default y if MFD_TC6393XB
40 # PPC: 41 # PPC:
41 default y if STB03xxx 42 default y if STB03xxx
42 default y if PPC_MPC52xx 43 default y if PPC_MPC52xx
@@ -97,6 +98,8 @@ source "drivers/usb/core/Kconfig"
97 98
98source "drivers/usb/mon/Kconfig" 99source "drivers/usb/mon/Kconfig"
99 100
101source "drivers/usb/wusbcore/Kconfig"
102
100source "drivers/usb/host/Kconfig" 103source "drivers/usb/host/Kconfig"
101 104
102source "drivers/usb/musb/Kconfig" 105source "drivers/usb/musb/Kconfig"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index a419c42e880e..8b7c419b876e 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -16,9 +16,12 @@ obj-$(CONFIG_USB_UHCI_HCD) += host/
16obj-$(CONFIG_USB_SL811_HCD) += host/ 16obj-$(CONFIG_USB_SL811_HCD) += host/
17obj-$(CONFIG_USB_U132_HCD) += host/ 17obj-$(CONFIG_USB_U132_HCD) += host/
18obj-$(CONFIG_USB_R8A66597_HCD) += host/ 18obj-$(CONFIG_USB_R8A66597_HCD) += host/
19obj-$(CONFIG_USB_HWA_HCD) += host/
19 20
20obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ 21obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
21 22
23obj-$(CONFIG_USB_WUSB) += wusbcore/
24
22obj-$(CONFIG_USB_ACM) += class/ 25obj-$(CONFIG_USB_ACM) += class/
23obj-$(CONFIG_USB_PRINTER) += class/ 26obj-$(CONFIG_USB_PRINTER) += class/
24 27
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 76fce44c2f9a..3e862401a638 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -722,6 +722,16 @@ static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_de
722 flush_scheduled_work(); 722 flush_scheduled_work();
723} 723}
724 724
725static int speedtch_pre_reset(struct usb_interface *intf)
726{
727 return 0;
728}
729
730static int speedtch_post_reset(struct usb_interface *intf)
731{
732 return 0;
733}
734
725 735
726/********** 736/**********
727** USB ** 737** USB **
@@ -740,6 +750,8 @@ static struct usb_driver speedtch_usb_driver = {
740 .name = speedtch_driver_name, 750 .name = speedtch_driver_name,
741 .probe = speedtch_usb_probe, 751 .probe = speedtch_usb_probe,
742 .disconnect = usbatm_usb_disconnect, 752 .disconnect = usbatm_usb_disconnect,
753 .pre_reset = speedtch_pre_reset,
754 .post_reset = speedtch_post_reset,
743 .id_table = speedtch_usb_ids 755 .id_table = speedtch_usb_ids
744}; 756};
745 757
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index fab23ee8702b..20104443081a 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -849,9 +849,10 @@ static void acm_write_buffers_free(struct acm *acm)
849{ 849{
850 int i; 850 int i;
851 struct acm_wb *wb; 851 struct acm_wb *wb;
852 struct usb_device *usb_dev = interface_to_usbdev(acm->control);
852 853
853 for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) { 854 for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) {
854 usb_buffer_free(acm->dev, acm->writesize, wb->buf, wb->dmah); 855 usb_buffer_free(usb_dev, acm->writesize, wb->buf, wb->dmah);
855 } 856 }
856} 857}
857 858
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 7429f70b9d06..5a8ecc045e3f 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -42,6 +42,8 @@ static struct usb_device_id wdm_ids[] = {
42 { } 42 { }
43}; 43};
44 44
45MODULE_DEVICE_TABLE (usb, wdm_ids);
46
45#define WDM_MINOR_BASE 176 47#define WDM_MINOR_BASE 176
46 48
47 49
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e935be7eb468..3d7793d93031 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1610,7 +1610,8 @@ int usb_external_resume_device(struct usb_device *udev)
1610 status = usb_resume_both(udev); 1610 status = usb_resume_both(udev);
1611 udev->last_busy = jiffies; 1611 udev->last_busy = jiffies;
1612 usb_pm_unlock(udev); 1612 usb_pm_unlock(udev);
1613 do_unbind_rebind(udev, DO_REBIND); 1613 if (status == 0)
1614 do_unbind_rebind(udev, DO_REBIND);
1614 1615
1615 /* Now that the device is awake, we can start trying to autosuspend 1616 /* Now that the device is awake, we can start trying to autosuspend
1616 * it again. */ 1617 * it again. */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d73ce262c365..9b3f16bd12cb 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3504,7 +3504,7 @@ int usb_reset_device(struct usb_device *udev)
3504 USB_INTERFACE_BOUND) 3504 USB_INTERFACE_BOUND)
3505 rebind = 1; 3505 rebind = 1;
3506 } 3506 }
3507 if (rebind) 3507 if (ret == 0 && rebind)
3508 usb_rebind_intf(cintf); 3508 usb_rebind_intf(cintf);
3509 } 3509 }
3510 } 3510 }
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 1ca1c326392a..e1191b9a316a 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -168,7 +168,7 @@ usb_copy_descriptors(struct usb_descriptor_header **src)
168 * usb_find_endpoint - find a copy of an endpoint descriptor 168 * usb_find_endpoint - find a copy of an endpoint descriptor
169 * @src: original vector of descriptors 169 * @src: original vector of descriptors
170 * @copy: copy of @src 170 * @copy: copy of @src
171 * @ep: endpoint descriptor found in @src 171 * @match: endpoint descriptor found in @src
172 * 172 *
173 * This returns the copy of the @match descriptor made for @copy. Its 173 * This returns the copy of the @match descriptor made for @copy. Its
174 * intended use is to help remembering the endpoint descriptor to use 174 * intended use is to help remembering the endpoint descriptor to use
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index bcf375ca3d72..caa37c95802c 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -650,7 +650,7 @@ pxa_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
650 struct pxa27x_request *req; 650 struct pxa27x_request *req;
651 651
652 req = kzalloc(sizeof *req, gfp_flags); 652 req = kzalloc(sizeof *req, gfp_flags);
653 if (!req || !_ep) 653 if (!req)
654 return NULL; 654 return NULL;
655 655
656 INIT_LIST_HEAD(&req->queue); 656 INIT_LIST_HEAD(&req->queue);
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 48f51b12d2e2..00ba06b44752 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1894,11 +1894,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
1894 udc->regs_info = debugfs_create_file("registers", S_IRUGO, 1894 udc->regs_info = debugfs_create_file("registers", S_IRUGO,
1895 s3c2410_udc_debugfs_root, 1895 s3c2410_udc_debugfs_root,
1896 udc, &s3c2410_udc_debugfs_fops); 1896 udc, &s3c2410_udc_debugfs_fops);
1897 if (IS_ERR(udc->regs_info)) { 1897 if (!udc->regs_info)
1898 dev_warn(dev, "debugfs file creation failed %ld\n", 1898 dev_warn(dev, "debugfs file creation failed\n");
1899 PTR_ERR(udc->regs_info));
1900 udc->regs_info = NULL;
1901 }
1902 } 1899 }
1903 1900
1904 dev_dbg(dev, "probe ok\n"); 1901 dev_dbg(dev, "probe ok\n");
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 228797e54f9c..56f592dc0b36 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -138,7 +138,6 @@ config USB_OHCI_HCD
138 tristate "OHCI HCD support" 138 tristate "OHCI HCD support"
139 depends on USB && USB_ARCH_HAS_OHCI 139 depends on USB && USB_ARCH_HAS_OHCI
140 select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 140 select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
141 select I2C if ARCH_PNX4008
142 ---help--- 141 ---help---
143 The Open Host Controller Interface (OHCI) is a standard for accessing 142 The Open Host Controller Interface (OHCI) is a standard for accessing
144 USB 1.1 host controller hardware. It does more in hardware than Intel's 143 USB 1.1 host controller hardware. It does more in hardware than Intel's
@@ -305,3 +304,31 @@ config SUPERH_ON_CHIP_R8A66597
305 help 304 help
306 This driver enables support for the on-chip R8A66597 in the 305 This driver enables support for the on-chip R8A66597 in the
307 SH7366 and SH7723 processors. 306 SH7366 and SH7723 processors.
307
308config USB_WHCI_HCD
309 tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
310 depends on EXPERIMENTAL
311 depends on PCI && USB
312 select USB_WUSB
313 select UWB_WHCI
314 help
315 A driver for PCI-based Wireless USB Host Controllers that are
316 compliant with the WHCI specification.
317
318 To compile this driver a module, choose M here: the module
319 will be called "whci-hcd".
320
321config USB_HWA_HCD
322 tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)"
323 depends on EXPERIMENTAL
324 depends on USB
325 select USB_WUSB
326 select UWB_HWA
327 help
328 This driver enables you to connect Wireless USB devices to
329 your system using a Host Wire Adaptor USB dongle. This is an
330 UWB Radio Controller and WUSB Host Controller connected to
331 your machine via USB (specified in WUSB1.0).
332
333 To compile this driver a module, choose M here: the module
334 will be called "hwa-hc".
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f1edda2dcfde..23be22224044 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -8,6 +8,8 @@ endif
8 8
9isp1760-objs := isp1760-hcd.o isp1760-if.o 9isp1760-objs := isp1760-hcd.o isp1760-if.o
10 10
11obj-$(CONFIG_USB_WHCI_HCD) += whci/
12
11obj-$(CONFIG_PCI) += pci-quirks.o 13obj-$(CONFIG_PCI) += pci-quirks.o
12 14
13obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o 15obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
@@ -19,3 +21,4 @@ obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
19obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o 21obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
20obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o 22obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
21obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o 23obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
24obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
new file mode 100644
index 000000000000..64be4d88df11
--- /dev/null
+++ b/drivers/usb/host/hwa-hc.c
@@ -0,0 +1,925 @@
1/*
2 * Host Wire Adapter:
3 * Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * The HWA driver is a simple layer that forwards requests to the WAHC
24 * (Wire Adater Host Controller) or WUSBHC (Wireless USB Host
25 * Controller) layers.
26 *
27 * Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB
28 * Host Controller that is connected to your system via USB (a USB
29 * dongle that implements a USB host...). There is also a Device Wired
30 * Adaptor, DWA (Wireless USB hub) that uses the same mechanism for
31 * transferring data (it is after all a USB host connected via
32 * Wireless USB), we have a common layer called Wire Adapter Host
33 * Controller that does all the hard work. The WUSBHC (Wireless USB
34 * Host Controller) is the part common to WUSB Host Controllers, the
35 * HWA and the PCI-based one, that is implemented following the WHCI
36 * spec. All these layers are implemented in ../wusbcore.
37 *
38 * The main functions are hwahc_op_urb_{en,de}queue(), that pass the
39 * job of converting a URB to a Wire Adapter
40 *
41 * Entry points:
42 *
43 * hwahc_driver_*() Driver initialization, registration and
44 * teardown.
45 *
46 * hwahc_probe() New device came up, create an instance for
47 * it [from device enumeration].
48 *
49 * hwahc_disconnect() Remove device instance [from device
50 * enumeration].
51 *
52 * [__]hwahc_op_*() Host-Wire-Adaptor specific functions for
53 * starting/stopping/etc (some might be made also
54 * DWA).
55 */
56#include <linux/kernel.h>
57#include <linux/version.h>
58#include <linux/init.h>
59#include <linux/module.h>
60#include <linux/workqueue.h>
61#include <linux/wait.h>
62#include <linux/completion.h>
63#include "../wusbcore/wa-hc.h"
64#include "../wusbcore/wusbhc.h"
65
66#define D_LOCAL 0
67#include <linux/uwb/debug.h>
68
69struct hwahc {
70 struct wusbhc wusbhc; /* has to be 1st */
71 struct wahc wa;
72 u8 buffer[16]; /* for misc usb transactions */
73};
74
75/**
76 * FIXME should be wusbhc
77 *
78 * NOTE: we need to cache the Cluster ID because later...there is no
79 * way to get it :)
80 */
81static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id)
82{
83 int result;
84 struct wusbhc *wusbhc = &hwahc->wusbhc;
85 struct wahc *wa = &hwahc->wa;
86 struct device *dev = &wa->usb_iface->dev;
87
88 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
89 WUSB_REQ_SET_CLUSTER_ID,
90 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
91 cluster_id,
92 wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
93 NULL, 0, 1000 /* FIXME: arbitrary */);
94 if (result < 0)
95 dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n",
96 cluster_id, result);
97 else
98 wusbhc->cluster_id = cluster_id;
99 dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id);
100 return result;
101}
102
103static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
104{
105 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
106 struct wahc *wa = &hwahc->wa;
107
108 return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
109 WUSB_REQ_SET_NUM_DNTS,
110 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
111 interval << 8 | slots,
112 wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
113 NULL, 0, 1000 /* FIXME: arbitrary */);
114}
115
116/*
117 * Reset a WUSB host controller and wait for it to complete doing it.
118 *
119 * @usb_hcd: Pointer to WUSB Host Controller instance.
120 *
121 */
122static int hwahc_op_reset(struct usb_hcd *usb_hcd)
123{
124 int result;
125 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
126 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
127 struct device *dev = &hwahc->wa.usb_iface->dev;
128
129 d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
130 mutex_lock(&wusbhc->mutex);
131 wa_nep_disarm(&hwahc->wa);
132 result = __wa_set_feature(&hwahc->wa, WA_RESET);
133 if (result < 0) {
134 dev_err(dev, "error commanding HC to reset: %d\n", result);
135 goto error_unlock;
136 }
137 d_printf(3, dev, "reset: waiting for device to change state\n");
138 result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0);
139 if (result < 0) {
140 dev_err(dev, "error waiting for HC to reset: %d\n", result);
141 goto error_unlock;
142 }
143error_unlock:
144 mutex_unlock(&wusbhc->mutex);
145 d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
146 return result;
147}
148
149/*
150 * FIXME: break this function up
151 */
152static int hwahc_op_start(struct usb_hcd *usb_hcd)
153{
154 u8 addr;
155 int result;
156 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
157 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
158 struct device *dev = &hwahc->wa.usb_iface->dev;
159
160 /* Set up a Host Info WUSB Information Element */
161 d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
162 result = -ENOSPC;
163 mutex_lock(&wusbhc->mutex);
164 /* Start the numbering from the top so that the bottom
165 * range of the unauth addr space is used for devices,
166 * the top for HCs; use 0xfe - RC# */
167 addr = wusb_cluster_id_get();
168 if (addr == 0)
169 goto error_cluster_id_get;
170 result = __hwahc_set_cluster_id(hwahc, addr);
171 if (result < 0)
172 goto error_set_cluster_id;
173
174 result = wa_nep_arm(&hwahc->wa, GFP_KERNEL);
175 if (result < 0) {
176 dev_err(dev, "cannot listen to notifications: %d\n", result);
177 goto error_stop;
178 }
179 usb_hcd->uses_new_polling = 1;
180 usb_hcd->poll_rh = 1;
181 usb_hcd->state = HC_STATE_RUNNING;
182 result = 0;
183out:
184 mutex_unlock(&wusbhc->mutex);
185 d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
186 return result;
187
188error_stop:
189 __wa_stop(&hwahc->wa);
190error_set_cluster_id:
191 wusb_cluster_id_put(wusbhc->cluster_id);
192error_cluster_id_get:
193 goto out;
194
195}
196
197/*
198 * FIXME: break this function up
199 */
200static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc)
201{
202 int result;
203 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
204 struct device *dev = &hwahc->wa.usb_iface->dev;
205
206 /* Set up a Host Info WUSB Information Element */
207 d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
208 result = -ENOSPC;
209
210 result = __wa_set_feature(&hwahc->wa, WA_ENABLE);
211 if (result < 0) {
212 dev_err(dev, "error commanding HC to start: %d\n", result);
213 goto error_stop;
214 }
215 result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE);
216 if (result < 0) {
217 dev_err(dev, "error waiting for HC to start: %d\n", result);
218 goto error_stop;
219 }
220 result = 0;
221out:
222 d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
223 return result;
224
225error_stop:
226 result = __wa_clear_feature(&hwahc->wa, WA_ENABLE);
227 goto out;
228}
229
230static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg)
231{
232 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
233 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
234 dev_err(wusbhc->dev, "%s (%p [%p], 0x%lx) UNIMPLEMENTED\n", __func__,
235 usb_hcd, hwahc, *(unsigned long *) &msg);
236 return -ENOSYS;
237}
238
239static int hwahc_op_resume(struct usb_hcd *usb_hcd)
240{
241 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
242 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
243
244 dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
245 usb_hcd, hwahc);
246 return -ENOSYS;
247}
248
249static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc)
250{
251 int result;
252 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
253 struct device *dev = &hwahc->wa.usb_iface->dev;
254
255 d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
256 /* Nothing for now */
257 d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
258 return;
259}
260
261/*
262 * No need to abort pipes, as when this is called, all the children
263 * has been disconnected and that has done it [through
264 * usb_disable_interface() -> usb_disable_endpoint() ->
265 * hwahc_op_ep_disable() - >rpipe_ep_disable()].
266 */
267static void hwahc_op_stop(struct usb_hcd *usb_hcd)
268{
269 int result;
270 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
271 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
272 struct wahc *wa = &hwahc->wa;
273 struct device *dev = &wa->usb_iface->dev;
274
275 d_fnstart(4, dev, "(hwahc %p)\n", hwahc);
276 mutex_lock(&wusbhc->mutex);
277 wusbhc_stop(wusbhc);
278 wa_nep_disarm(&hwahc->wa);
279 result = __wa_stop(&hwahc->wa);
280 wusb_cluster_id_put(wusbhc->cluster_id);
281 mutex_unlock(&wusbhc->mutex);
282 d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result);
283 return;
284}
285
286static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd)
287{
288 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
289 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
290
291 dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
292 usb_hcd, hwahc);
293 return -ENOSYS;
294}
295
296static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
297 gfp_t gfp)
298{
299 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
300 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
301
302 return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp);
303}
304
305static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb,
306 int status)
307{
308 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
309 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
310
311 return wa_urb_dequeue(&hwahc->wa, urb);
312}
313
314/*
315 * Release resources allocated for an endpoint
316 *
317 * If there is an associated rpipe to this endpoint, go ahead and put it.
318 */
319static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd,
320 struct usb_host_endpoint *ep)
321{
322 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
323 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
324
325 rpipe_ep_disable(&hwahc->wa, ep);
326}
327
328/*
329 * Set the UWB MAS allocation for the WUSB cluster
330 *
331 * @stream_index: stream to use (-1 for cancelling the allocation)
332 * @mas: mas bitmap to use
333 */
334static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
335 const struct uwb_mas_bm *mas)
336{
337 int result;
338 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
339 struct wahc *wa = &hwahc->wa;
340 struct device *dev = &wa->usb_iface->dev;
341 u8 mas_le[UWB_NUM_MAS/8];
342
343 /* Set the stream index */
344 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
345 WUSB_REQ_SET_STREAM_IDX,
346 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
347 stream_index,
348 wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
349 NULL, 0, 1000 /* FIXME: arbitrary */);
350 if (result < 0) {
351 dev_err(dev, "Cannot set WUSB stream index: %d\n", result);
352 goto out;
353 }
354 uwb_mas_bm_copy_le(mas_le, mas);
355 /* Set the MAS allocation */
356 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
357 WUSB_REQ_SET_WUSB_MAS,
358 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
359 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
360 mas_le, 32, 1000 /* FIXME: arbitrary */);
361 if (result < 0)
362 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
363out:
364 return result;
365}
366
367/*
368 * Add an IE to the host's MMC
369 *
370 * @interval: See WUSB1.0[8.5.3.1]
371 * @repeat_cnt: See WUSB1.0[8.5.3.1]
372 * @handle: See WUSB1.0[8.5.3.1]
373 * @wuie: Pointer to the header of the WUSB IE data to add.
374 * MUST BE allocated in a kmalloc buffer (no stack or
375 * vmalloc).
376 *
377 * NOTE: the format of the WUSB IEs for MMCs are different to the
378 * normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length +
379 * Id in WUSB IEs). Standards...you gotta love'em.
380 */
381static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval,
382 u8 repeat_cnt, u8 handle,
383 struct wuie_hdr *wuie)
384{
385 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
386 struct wahc *wa = &hwahc->wa;
387 u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
388
389 return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
390 WUSB_REQ_ADD_MMC_IE,
391 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
392 interval << 8 | repeat_cnt,
393 handle << 8 | iface_no,
394 wuie, wuie->bLength, 1000 /* FIXME: arbitrary */);
395}
396
397/*
398 * Remove an IE to the host's MMC
399 *
400 * @handle: See WUSB1.0[8.5.3.1]
401 */
402static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
403{
404 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
405 struct wahc *wa = &hwahc->wa;
406 u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
407 return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
408 WUSB_REQ_REMOVE_MMC_IE,
409 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
410 0, handle << 8 | iface_no,
411 NULL, 0, 1000 /* FIXME: arbitrary */);
412}
413
414/*
415 * Update device information for a given fake port
416 *
417 * @port_idx: Fake port to which device is connected (wusbhc index, not
418 * USB port number).
419 */
420static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc,
421 struct wusb_dev *wusb_dev)
422{
423 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
424 struct wahc *wa = &hwahc->wa;
425 u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
426 struct hwa_dev_info *dev_info;
427 int ret;
428
429 /* fill out the Device Info buffer and send it */
430 dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL);
431 if (!dev_info)
432 return -ENOMEM;
433 uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability,
434 &wusb_dev->availability);
435 dev_info->bDeviceAddress = wusb_dev->addr;
436
437 /*
438 * If the descriptors haven't been read yet, use a default PHY
439 * rate of 53.3 Mbit/s only. The correct value will be used
440 * when this will be called again as part of the
441 * authentication process (which occurs after the descriptors
442 * have been read).
443 */
444 if (wusb_dev->wusb_cap_descr)
445 dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates;
446 else
447 dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53);
448
449 ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
450 WUSB_REQ_SET_DEV_INFO,
451 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
452 0, wusb_dev->port_idx << 8 | iface_no,
453 dev_info, sizeof(struct hwa_dev_info),
454 1000 /* FIXME: arbitrary */);
455 kfree(dev_info);
456 return ret;
457}
458
459/*
460 * Set host's idea of which encryption (and key) method to use when
461 * talking to ad evice on a given port.
462 *
463 * If key is NULL, it means disable encryption for that "virtual port"
464 * (used when we disconnect).
465 */
466static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
467 const void *key, size_t key_size,
468 u8 key_idx)
469{
470 int result = -ENOMEM;
471 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
472 struct wahc *wa = &hwahc->wa;
473 u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
474 struct usb_key_descriptor *keyd;
475 size_t keyd_len;
476
477 keyd_len = sizeof(*keyd) + key_size;
478 keyd = kzalloc(keyd_len, GFP_KERNEL);
479 if (keyd == NULL)
480 return -ENOMEM;
481
482 keyd->bLength = keyd_len;
483 keyd->bDescriptorType = USB_DT_KEY;
484 keyd->tTKID[0] = (tkid >> 0) & 0xff;
485 keyd->tTKID[1] = (tkid >> 8) & 0xff;
486 keyd->tTKID[2] = (tkid >> 16) & 0xff;
487 memcpy(keyd->bKeyData, key, key_size);
488
489 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
490 USB_REQ_SET_DESCRIPTOR,
491 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
492 USB_DT_KEY << 8 | key_idx,
493 port_idx << 8 | iface_no,
494 keyd, keyd_len, 1000 /* FIXME: arbitrary */);
495
496 memset(keyd, 0, sizeof(*keyd)); /* clear keys etc. */
497 kfree(keyd);
498 return result;
499}
500
501/*
502 * Set host's idea of which encryption (and key) method to use when
503 * talking to ad evice on a given port.
504 *
505 * If key is NULL, it means disable encryption for that "virtual port"
506 * (used when we disconnect).
507 */
508static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
509 const void *key, size_t key_size)
510{
511 int result = -ENOMEM;
512 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
513 struct wahc *wa = &hwahc->wa;
514 u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
515 u8 encryption_value;
516
517 /* Tell the host which key to use to talk to the device */
518 if (key) {
519 u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK,
520 WUSB_KEY_INDEX_ORIGINATOR_HOST);
521
522 result = __hwahc_dev_set_key(wusbhc, port_idx, tkid,
523 key, key_size, key_idx);
524 if (result < 0)
525 goto error_set_key;
526 encryption_value = wusbhc->ccm1_etd->bEncryptionValue;
527 } else {
528 /* FIXME: this should come from wusbhc->etd[UNSECURE].value */
529 encryption_value = 0;
530 }
531
532 /* Set the encryption type for commmunicating with the device */
533 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
534 USB_REQ_SET_ENCRYPTION,
535 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
536 encryption_value, port_idx << 8 | iface_no,
537 NULL, 0, 1000 /* FIXME: arbitrary */);
538 if (result < 0)
539 dev_err(wusbhc->dev, "Can't set host's WUSB encryption for "
540 "port index %u to %s (value %d): %d\n", port_idx,
541 wusb_et_name(wusbhc->ccm1_etd->bEncryptionType),
542 wusbhc->ccm1_etd->bEncryptionValue, result);
543error_set_key:
544 return result;
545}
546
547/*
548 * Set host's GTK key
549 */
550static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid,
551 const void *key, size_t key_size)
552{
553 u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
554 WUSB_KEY_INDEX_ORIGINATOR_HOST);
555
556 return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx);
557}
558
559/*
560 * Get the Wire Adapter class-specific descriptor
561 *
562 * NOTE: this descriptor comes with the big bundled configuration
563 * descriptor that includes the interfaces' and endpoints', so
564 * we just look for it in the cached copy kept by the USB stack.
565 *
566 * NOTE2: We convert LE fields to CPU order.
567 */
568static int wa_fill_descr(struct wahc *wa)
569{
570 int result;
571 struct device *dev = &wa->usb_iface->dev;
572 char *itr;
573 struct usb_device *usb_dev = wa->usb_dev;
574 struct usb_descriptor_header *hdr;
575 struct usb_wa_descriptor *wa_descr;
576 size_t itr_size, actconfig_idx;
577
578 actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
579 sizeof(usb_dev->config[0]);
580 itr = usb_dev->rawdescriptors[actconfig_idx];
581 itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
582 while (itr_size >= sizeof(*hdr)) {
583 hdr = (struct usb_descriptor_header *) itr;
584 d_printf(3, dev, "Extra device descriptor: "
585 "type %02x/%u bytes @ %zu (%zu left)\n",
586 hdr->bDescriptorType, hdr->bLength,
587 (itr - usb_dev->rawdescriptors[actconfig_idx]),
588 itr_size);
589 if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER)
590 goto found;
591 itr += hdr->bLength;
592 itr_size -= hdr->bLength;
593 }
594 dev_err(dev, "cannot find Wire Adapter Class descriptor\n");
595 return -ENODEV;
596
597found:
598 result = -EINVAL;
599 if (hdr->bLength > itr_size) { /* is it available? */
600 dev_err(dev, "incomplete Wire Adapter Class descriptor "
601 "(%zu bytes left, %u needed)\n",
602 itr_size, hdr->bLength);
603 goto error;
604 }
605 if (hdr->bLength < sizeof(*wa->wa_descr)) {
606 dev_err(dev, "short Wire Adapter Class descriptor\n");
607 goto error;
608 }
609 wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
610 /* Make LE fields CPU order */
611 wa_descr->bcdWAVersion = le16_to_cpu(wa_descr->bcdWAVersion);
612 wa_descr->wNumRPipes = le16_to_cpu(wa_descr->wNumRPipes);
613 wa_descr->wRPipeMaxBlock = le16_to_cpu(wa_descr->wRPipeMaxBlock);
614 if (wa_descr->bcdWAVersion > 0x0100)
615 dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
616 wa_descr->bcdWAVersion & 0xff00 >> 8,
617 wa_descr->bcdWAVersion & 0x00ff);
618 result = 0;
619error:
620 return result;
621}
622
623static struct hc_driver hwahc_hc_driver = {
624 .description = "hwa-hcd",
625 .product_desc = "Wireless USB HWA host controller",
626 .hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd),
627 .irq = NULL, /* FIXME */
628 .flags = HCD_USB2, /* FIXME */
629 .reset = hwahc_op_reset,
630 .start = hwahc_op_start,
631 .pci_suspend = hwahc_op_suspend,
632 .pci_resume = hwahc_op_resume,
633 .stop = hwahc_op_stop,
634 .get_frame_number = hwahc_op_get_frame_number,
635 .urb_enqueue = hwahc_op_urb_enqueue,
636 .urb_dequeue = hwahc_op_urb_dequeue,
637 .endpoint_disable = hwahc_op_endpoint_disable,
638
639 .hub_status_data = wusbhc_rh_status_data,
640 .hub_control = wusbhc_rh_control,
641 .bus_suspend = wusbhc_rh_suspend,
642 .bus_resume = wusbhc_rh_resume,
643 .start_port_reset = wusbhc_rh_start_port_reset,
644};
645
646static int hwahc_security_create(struct hwahc *hwahc)
647{
648 int result;
649 struct wusbhc *wusbhc = &hwahc->wusbhc;
650 struct usb_device *usb_dev = hwahc->wa.usb_dev;
651 struct device *dev = &usb_dev->dev;
652 struct usb_security_descriptor *secd;
653 struct usb_encryption_descriptor *etd;
654 void *itr, *top;
655 size_t itr_size, needed, bytes;
656 u8 index;
657 char buf[64];
658
659 /* Find the host's security descriptors in the config descr bundle */
660 index = (usb_dev->actconfig - usb_dev->config) /
661 sizeof(usb_dev->config[0]);
662 itr = usb_dev->rawdescriptors[index];
663 itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
664 top = itr + itr_size;
665 result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
666 le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
667 USB_DT_SECURITY, (void **) &secd);
668 if (result == -1) {
669 dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
670 return 0;
671 }
672 needed = sizeof(*secd);
673 if (top - (void *)secd < needed) {
674 dev_err(dev, "BUG? Not enough data to process security "
675 "descriptor header (%zu bytes left vs %zu needed)\n",
676 top - (void *) secd, needed);
677 return 0;
678 }
679 needed = le16_to_cpu(secd->wTotalLength);
680 if (top - (void *)secd < needed) {
681 dev_err(dev, "BUG? Not enough data to process security "
682 "descriptors (%zu bytes left vs %zu needed)\n",
683 top - (void *) secd, needed);
684 return 0;
685 }
686 /* Walk over the sec descriptors and store CCM1's on wusbhc */
687 itr = (void *) secd + sizeof(*secd);
688 top = (void *) secd + le16_to_cpu(secd->wTotalLength);
689 index = 0;
690 bytes = 0;
691 while (itr < top) {
692 etd = itr;
693 if (top - itr < sizeof(*etd)) {
694 dev_err(dev, "BUG: bad host security descriptor; "
695 "not enough data (%zu vs %zu left)\n",
696 top - itr, sizeof(*etd));
697 break;
698 }
699 if (etd->bLength < sizeof(*etd)) {
700 dev_err(dev, "BUG: bad host encryption descriptor; "
701 "descriptor is too short "
702 "(%zu vs %zu needed)\n",
703 (size_t)etd->bLength, sizeof(*etd));
704 break;
705 }
706 itr += etd->bLength;
707 bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
708 "%s (0x%02x) ",
709 wusb_et_name(etd->bEncryptionType),
710 etd->bEncryptionValue);
711 wusbhc->ccm1_etd = etd;
712 }
713 dev_info(dev, "supported encryption types: %s\n", buf);
714 if (wusbhc->ccm1_etd == NULL) {
715 dev_err(dev, "E: host doesn't support CCM-1 crypto\n");
716 return 0;
717 }
718 /* Pretty print what we support */
719 return 0;
720}
721
722static void hwahc_security_release(struct hwahc *hwahc)
723{
724 /* nothing to do here so far... */
725}
726
727static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface)
728{
729 int result;
730 struct device *dev = &iface->dev;
731 struct wusbhc *wusbhc = &hwahc->wusbhc;
732 struct wahc *wa = &hwahc->wa;
733 struct usb_device *usb_dev = interface_to_usbdev(iface);
734
735 wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
736 wa->usb_iface = usb_get_intf(iface);
737 wusbhc->dev = dev;
738 wusbhc->uwb_rc = uwb_rc_get_by_grandpa(iface->dev.parent);
739 if (wusbhc->uwb_rc == NULL) {
740 result = -ENODEV;
741 dev_err(dev, "Cannot get associated UWB Host Controller\n");
742 goto error_rc_get;
743 }
744 result = wa_fill_descr(wa); /* Get the device descriptor */
745 if (result < 0)
746 goto error_fill_descriptor;
747 if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) {
748 dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB "
749 "adapter (%u ports)\n", wa->wa_descr->bNumPorts);
750 wusbhc->ports_max = USB_MAXCHILDREN;
751 } else {
752 wusbhc->ports_max = wa->wa_descr->bNumPorts;
753 }
754 wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs;
755 wusbhc->start = __hwahc_op_wusbhc_start;
756 wusbhc->stop = __hwahc_op_wusbhc_stop;
757 wusbhc->mmcie_add = __hwahc_op_mmcie_add;
758 wusbhc->mmcie_rm = __hwahc_op_mmcie_rm;
759 wusbhc->dev_info_set = __hwahc_op_dev_info_set;
760 wusbhc->bwa_set = __hwahc_op_bwa_set;
761 wusbhc->set_num_dnts = __hwahc_op_set_num_dnts;
762 wusbhc->set_ptk = __hwahc_op_set_ptk;
763 wusbhc->set_gtk = __hwahc_op_set_gtk;
764 result = hwahc_security_create(hwahc);
765 if (result < 0) {
766 dev_err(dev, "Can't initialize security: %d\n", result);
767 goto error_security_create;
768 }
769 wa->wusb = wusbhc; /* FIXME: ugly, need to fix */
770 result = wusbhc_create(&hwahc->wusbhc);
771 if (result < 0) {
772 dev_err(dev, "Can't create WUSB HC structures: %d\n", result);
773 goto error_wusbhc_create;
774 }
775 result = wa_create(&hwahc->wa, iface);
776 if (result < 0)
777 goto error_wa_create;
778 return 0;
779
780error_wa_create:
781 wusbhc_destroy(&hwahc->wusbhc);
782error_wusbhc_create:
783 /* WA Descr fill allocs no resources */
784error_security_create:
785error_fill_descriptor:
786 uwb_rc_put(wusbhc->uwb_rc);
787error_rc_get:
788 usb_put_intf(iface);
789 usb_put_dev(usb_dev);
790 return result;
791}
792
793static void hwahc_destroy(struct hwahc *hwahc)
794{
795 struct wusbhc *wusbhc = &hwahc->wusbhc;
796
797 d_fnstart(1, NULL, "(hwahc %p)\n", hwahc);
798 mutex_lock(&wusbhc->mutex);
799 __wa_destroy(&hwahc->wa);
800 wusbhc_destroy(&hwahc->wusbhc);
801 hwahc_security_release(hwahc);
802 hwahc->wusbhc.dev = NULL;
803 uwb_rc_put(wusbhc->uwb_rc);
804 usb_put_intf(hwahc->wa.usb_iface);
805 usb_put_dev(hwahc->wa.usb_dev);
806 mutex_unlock(&wusbhc->mutex);
807 d_fnend(1, NULL, "(hwahc %p) = void\n", hwahc);
808}
809
810static void hwahc_init(struct hwahc *hwahc)
811{
812 wa_init(&hwahc->wa);
813}
814
815static int hwahc_probe(struct usb_interface *usb_iface,
816 const struct usb_device_id *id)
817{
818 int result;
819 struct usb_hcd *usb_hcd;
820 struct wusbhc *wusbhc;
821 struct hwahc *hwahc;
822 struct device *dev = &usb_iface->dev;
823
824 d_fnstart(4, dev, "(%p, %p)\n", usb_iface, id);
825 result = -ENOMEM;
826 usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa");
827 if (usb_hcd == NULL) {
828 dev_err(dev, "unable to allocate instance\n");
829 goto error_alloc;
830 }
831 usb_hcd->wireless = 1;
832 usb_hcd->flags |= HCD_FLAG_SAW_IRQ;
833 wusbhc = usb_hcd_to_wusbhc(usb_hcd);
834 hwahc = container_of(wusbhc, struct hwahc, wusbhc);
835 hwahc_init(hwahc);
836 result = hwahc_create(hwahc, usb_iface);
837 if (result < 0) {
838 dev_err(dev, "Cannot initialize internals: %d\n", result);
839 goto error_hwahc_create;
840 }
841 result = usb_add_hcd(usb_hcd, 0, 0);
842 if (result < 0) {
843 dev_err(dev, "Cannot add HCD: %d\n", result);
844 goto error_add_hcd;
845 }
846 result = wusbhc_b_create(&hwahc->wusbhc);
847 if (result < 0) {
848 dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result);
849 goto error_wusbhc_b_create;
850 }
851 d_fnend(4, dev, "(%p, %p) = 0\n", usb_iface, id);
852 return 0;
853
854error_wusbhc_b_create:
855 usb_remove_hcd(usb_hcd);
856error_add_hcd:
857 hwahc_destroy(hwahc);
858error_hwahc_create:
859 usb_put_hcd(usb_hcd);
860error_alloc:
861 d_fnend(4, dev, "(%p, %p) = %d\n", usb_iface, id, result);
862 return result;
863}
864
865static void hwahc_disconnect(struct usb_interface *usb_iface)
866{
867 struct usb_hcd *usb_hcd;
868 struct wusbhc *wusbhc;
869 struct hwahc *hwahc;
870
871 usb_hcd = usb_get_intfdata(usb_iface);
872 wusbhc = usb_hcd_to_wusbhc(usb_hcd);
873 hwahc = container_of(wusbhc, struct hwahc, wusbhc);
874
875 d_fnstart(1, NULL, "(hwahc %p [usb_iface %p])\n", hwahc, usb_iface);
876 wusbhc_b_destroy(&hwahc->wusbhc);
877 usb_remove_hcd(usb_hcd);
878 hwahc_destroy(hwahc);
879 usb_put_hcd(usb_hcd);
880 d_fnend(1, NULL, "(hwahc %p [usb_iface %p]) = void\n", hwahc,
881 usb_iface);
882}
883
884/** USB device ID's that we handle */
885static struct usb_device_id hwahc_id_table[] = {
886 /* FIXME: use class labels for this */
887 { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
888 {},
889};
890MODULE_DEVICE_TABLE(usb, hwahc_id_table);
891
892static struct usb_driver hwahc_driver = {
893 .name = "hwa-hc",
894 .probe = hwahc_probe,
895 .disconnect = hwahc_disconnect,
896 .id_table = hwahc_id_table,
897};
898
899static int __init hwahc_driver_init(void)
900{
901 int result;
902 result = usb_register(&hwahc_driver);
903 if (result < 0) {
904 printk(KERN_ERR "WA-CDS: Cannot register USB driver: %d\n",
905 result);
906 goto error_usb_register;
907 }
908 return 0;
909
910error_usb_register:
911 return result;
912
913}
914module_init(hwahc_driver_init);
915
916static void __exit hwahc_driver_exit(void)
917{
918 usb_deregister(&hwahc_driver);
919}
920module_exit(hwahc_driver_exit);
921
922
923MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
924MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
925MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 8647dab0d7f9..8aa3f4556a32 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1075,12 +1075,18 @@ MODULE_LICENSE ("GPL");
1075#define SM501_OHCI_DRIVER ohci_hcd_sm501_driver 1075#define SM501_OHCI_DRIVER ohci_hcd_sm501_driver
1076#endif 1076#endif
1077 1077
1078#ifdef CONFIG_MFD_TC6393XB
1079#include "ohci-tmio.c"
1080#define TMIO_OHCI_DRIVER ohci_hcd_tmio_driver
1081#endif
1082
1078#if !defined(PCI_DRIVER) && \ 1083#if !defined(PCI_DRIVER) && \
1079 !defined(PLATFORM_DRIVER) && \ 1084 !defined(PLATFORM_DRIVER) && \
1080 !defined(OF_PLATFORM_DRIVER) && \ 1085 !defined(OF_PLATFORM_DRIVER) && \
1081 !defined(SA1111_DRIVER) && \ 1086 !defined(SA1111_DRIVER) && \
1082 !defined(PS3_SYSTEM_BUS_DRIVER) && \ 1087 !defined(PS3_SYSTEM_BUS_DRIVER) && \
1083 !defined(SM501_OHCI_DRIVER) && \ 1088 !defined(SM501_OHCI_DRIVER) && \
1089 !defined(TMIO_OHCI_DRIVER) && \
1084 !defined(SSB_OHCI_DRIVER) 1090 !defined(SSB_OHCI_DRIVER)
1085#error "missing bus glue for ohci-hcd" 1091#error "missing bus glue for ohci-hcd"
1086#endif 1092#endif
@@ -1147,13 +1153,25 @@ static int __init ohci_hcd_mod_init(void)
1147 goto error_sm501; 1153 goto error_sm501;
1148#endif 1154#endif
1149 1155
1156#ifdef TMIO_OHCI_DRIVER
1157 retval = platform_driver_register(&TMIO_OHCI_DRIVER);
1158 if (retval < 0)
1159 goto error_tmio;
1160#endif
1161
1150 return retval; 1162 return retval;
1151 1163
1152 /* Error path */ 1164 /* Error path */
1165#ifdef TMIO_OHCI_DRIVER
1166 platform_driver_unregister(&TMIO_OHCI_DRIVER);
1167 error_tmio:
1168#endif
1153#ifdef SM501_OHCI_DRIVER 1169#ifdef SM501_OHCI_DRIVER
1170 platform_driver_unregister(&SM501_OHCI_DRIVER);
1154 error_sm501: 1171 error_sm501:
1155#endif 1172#endif
1156#ifdef SSB_OHCI_DRIVER 1173#ifdef SSB_OHCI_DRIVER
1174 ssb_driver_unregister(&SSB_OHCI_DRIVER);
1157 error_ssb: 1175 error_ssb:
1158#endif 1176#endif
1159#ifdef PCI_DRIVER 1177#ifdef PCI_DRIVER
@@ -1189,6 +1207,9 @@ module_init(ohci_hcd_mod_init);
1189 1207
1190static void __exit ohci_hcd_mod_exit(void) 1208static void __exit ohci_hcd_mod_exit(void)
1191{ 1209{
1210#ifdef TMIO_OHCI_DRIVER
1211 platform_driver_unregister(&TMIO_OHCI_DRIVER);
1212#endif
1192#ifdef SM501_OHCI_DRIVER 1213#ifdef SM501_OHCI_DRIVER
1193 platform_driver_unregister(&SM501_OHCI_DRIVER); 1214 platform_driver_unregister(&SM501_OHCI_DRIVER);
1194#endif 1215#endif
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
new file mode 100644
index 000000000000..f9f134af0bd1
--- /dev/null
+++ b/drivers/usb/host/ohci-tmio.c
@@ -0,0 +1,376 @@
1/*
2 * OHCI HCD(Host Controller Driver) for USB.
3 *
4 *(C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5 *(C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6 *(C) Copyright 2002 Hewlett-Packard Company
7 *
8 * Bus glue for Toshiba Mobile IO(TMIO) Controller's OHCI core
9 * (C) Copyright 2005 Chris Humbert <mahadri-usb@drigon.com>
10 * (C) Copyright 2007, 2008 Dmitry Baryshkov <dbaryshkov@gmail.com>
11 *
12 * This is known to work with the following variants:
13 * TC6393XB revision 3 (32kB SRAM)
14 *
15 * The TMIO's OHCI core DMAs through a small internal buffer that
16 * is directly addressable by the CPU.
17 *
18 * Written from sparse documentation from Toshiba and Sharp's driver
19 * for the 2.4 kernel,
20 * usb-ohci-tc6393.c(C) Copyright 2004 Lineo Solutions, Inc.
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License version 2 as
24 * published by the Free Software Foundation.
25 */
26
27/*#include <linux/fs.h>
28#include <linux/mount.h>
29#include <linux/pagemap.h>
30#include <linux/init.h>
31#include <linux/namei.h>
32#include <linux/sched.h>*/
33#include <linux/platform_device.h>
34#include <linux/mfd/core.h>
35#include <linux/mfd/tmio.h>
36#include <linux/dma-mapping.h>
37
38/*-------------------------------------------------------------------------*/
39
40/*
41 * USB Host Controller Configuration Register
42 */
43#define CCR_REVID 0x08 /* b Revision ID */
44#define CCR_BASE 0x10 /* l USB Control Register Base Address Low */
45#define CCR_ILME 0x40 /* b Internal Local Memory Enable */
46#define CCR_PM 0x4c /* w Power Management */
47#define CCR_INTC 0x50 /* b INT Control */
48#define CCR_LMW1L 0x54 /* w Local Memory Window 1 LMADRS Low */
49#define CCR_LMW1H 0x56 /* w Local Memory Window 1 LMADRS High */
50#define CCR_LMW1BL 0x58 /* w Local Memory Window 1 Base Address Low */
51#define CCR_LMW1BH 0x5A /* w Local Memory Window 1 Base Address High */
52#define CCR_LMW2L 0x5C /* w Local Memory Window 2 LMADRS Low */
53#define CCR_LMW2H 0x5E /* w Local Memory Window 2 LMADRS High */
54#define CCR_LMW2BL 0x60 /* w Local Memory Window 2 Base Address Low */
55#define CCR_LMW2BH 0x62 /* w Local Memory Window 2 Base Address High */
56#define CCR_MISC 0xFC /* b MISC */
57
58#define CCR_PM_GKEN 0x0001
59#define CCR_PM_CKRNEN 0x0002
60#define CCR_PM_USBPW1 0x0004
61#define CCR_PM_USBPW2 0x0008
62#define CCR_PM_USBPW3 0x0008
63#define CCR_PM_PMEE 0x0100
64#define CCR_PM_PMES 0x8000
65
66/*-------------------------------------------------------------------------*/
67
68struct tmio_hcd {
69 void __iomem *ccr;
70 spinlock_t lock; /* protects RMW cycles */
71};
72
73#define hcd_to_tmio(hcd) ((struct tmio_hcd *)(hcd_to_ohci(hcd) + 1))
74
75/*-------------------------------------------------------------------------*/
76
77static void tmio_write_pm(struct platform_device *dev)
78{
79 struct usb_hcd *hcd = platform_get_drvdata(dev);
80 struct tmio_hcd *tmio = hcd_to_tmio(hcd);
81 u16 pm;
82 unsigned long flags;
83
84 spin_lock_irqsave(&tmio->lock, flags);
85
86 pm = CCR_PM_GKEN | CCR_PM_CKRNEN |
87 CCR_PM_PMEE | CCR_PM_PMES;
88
89 tmio_iowrite16(pm, tmio->ccr + CCR_PM);
90 spin_unlock_irqrestore(&tmio->lock, flags);
91}
92
93static void tmio_stop_hc(struct platform_device *dev)
94{
95 struct usb_hcd *hcd = platform_get_drvdata(dev);
96 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
97 struct tmio_hcd *tmio = hcd_to_tmio(hcd);
98 u16 pm;
99
100 pm = CCR_PM_GKEN | CCR_PM_CKRNEN;
101 switch (ohci->num_ports) {
102 default:
103 dev_err(&dev->dev, "Unsupported amount of ports: %d\n", ohci->num_ports);
104 case 3:
105 pm |= CCR_PM_USBPW3;
106 case 2:
107 pm |= CCR_PM_USBPW2;
108 case 1:
109 pm |= CCR_PM_USBPW1;
110 }
111 tmio_iowrite8(0, tmio->ccr + CCR_INTC);
112 tmio_iowrite8(0, tmio->ccr + CCR_ILME);
113 tmio_iowrite16(0, tmio->ccr + CCR_BASE);
114 tmio_iowrite16(0, tmio->ccr + CCR_BASE + 2);
115 tmio_iowrite16(pm, tmio->ccr + CCR_PM);
116}
117
118static void tmio_start_hc(struct platform_device *dev)
119{
120 struct usb_hcd *hcd = platform_get_drvdata(dev);
121 struct tmio_hcd *tmio = hcd_to_tmio(hcd);
122 unsigned long base = hcd->rsrc_start;
123
124 tmio_write_pm(dev);
125 tmio_iowrite16(base, tmio->ccr + CCR_BASE);
126 tmio_iowrite16(base >> 16, tmio->ccr + CCR_BASE + 2);
127 tmio_iowrite8(1, tmio->ccr + CCR_ILME);
128 tmio_iowrite8(2, tmio->ccr + CCR_INTC);
129
130 dev_info(&dev->dev, "revision %d @ 0x%08llx, irq %d\n",
131 tmio_ioread8(tmio->ccr + CCR_REVID), hcd->rsrc_start, hcd->irq);
132}
133
134static int ohci_tmio_start(struct usb_hcd *hcd)
135{
136 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
137 int ret;
138
139 if ((ret = ohci_init(ohci)) < 0)
140 return ret;
141
142 if ((ret = ohci_run(ohci)) < 0) {
143 err("can't start %s", hcd->self.bus_name);
144 ohci_stop(hcd);
145 return ret;
146 }
147
148 return 0;
149}
150
151static const struct hc_driver ohci_tmio_hc_driver = {
152 .description = hcd_name,
153 .product_desc = "TMIO OHCI USB Host Controller",
154 .hcd_priv_size = sizeof(struct ohci_hcd) + sizeof (struct tmio_hcd),
155
156 /* generic hardware linkage */
157 .irq = ohci_irq,
158 .flags = HCD_USB11 | HCD_MEMORY | HCD_LOCAL_MEM,
159
160 /* basic lifecycle operations */
161 .start = ohci_tmio_start,
162 .stop = ohci_stop,
163 .shutdown = ohci_shutdown,
164
165 /* managing i/o requests and associated device resources */
166 .urb_enqueue = ohci_urb_enqueue,
167 .urb_dequeue = ohci_urb_dequeue,
168 .endpoint_disable = ohci_endpoint_disable,
169
170 /* scheduling support */
171 .get_frame_number = ohci_get_frame,
172
173 /* root hub support */
174 .hub_status_data = ohci_hub_status_data,
175 .hub_control = ohci_hub_control,
176#ifdef CONFIG_PM
177 .bus_suspend = ohci_bus_suspend,
178 .bus_resume = ohci_bus_resume,
179#endif
180 .start_port_reset = ohci_start_port_reset,
181};
182
183/*-------------------------------------------------------------------------*/
184static struct platform_driver ohci_hcd_tmio_driver;
185
186static int __devinit ohci_hcd_tmio_drv_probe(struct platform_device *dev)
187{
188 struct mfd_cell *cell = dev->dev.platform_data;
189 struct resource *regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
190 struct resource *config = platform_get_resource(dev, IORESOURCE_MEM, 1);
191 struct resource *sram = platform_get_resource(dev, IORESOURCE_MEM, 2);
192 int irq = platform_get_irq(dev, 0);
193 struct tmio_hcd *tmio;
194 struct ohci_hcd *ohci;
195 struct usb_hcd *hcd;
196 int ret;
197
198 if (usb_disabled())
199 return -ENODEV;
200
201 if (!cell)
202 return -EINVAL;
203
204 hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev->dev.bus_id);
205 if (!hcd) {
206 ret = -ENOMEM;
207 goto err_usb_create_hcd;
208 }
209
210 hcd->rsrc_start = regs->start;
211 hcd->rsrc_len = regs->end - regs->start + 1;
212
213 tmio = hcd_to_tmio(hcd);
214
215 spin_lock_init(&tmio->lock);
216
217 tmio->ccr = ioremap(config->start, config->end - config->start + 1);
218 if (!tmio->ccr) {
219 ret = -ENOMEM;
220 goto err_ioremap_ccr;
221 }
222
223 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
224 if (!hcd->regs) {
225 ret = -ENOMEM;
226 goto err_ioremap_regs;
227 }
228
229 if (!dma_declare_coherent_memory(&dev->dev, sram->start,
230 sram->start,
231 sram->end - sram->start + 1,
232 DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE)) {
233 ret = -EBUSY;
234 goto err_dma_declare;
235 }
236
237 if (cell->enable) {
238 ret = cell->enable(dev);
239 if (ret)
240 goto err_enable;
241 }
242
243 tmio_start_hc(dev);
244 ohci = hcd_to_ohci(hcd);
245 ohci_hcd_init(ohci);
246
247 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
248 if (ret)
249 goto err_add_hcd;
250
251 if (ret == 0)
252 return ret;
253
254 usb_remove_hcd(hcd);
255
256err_add_hcd:
257 tmio_stop_hc(dev);
258 if (cell->disable)
259 cell->disable(dev);
260err_enable:
261 dma_release_declared_memory(&dev->dev);
262err_dma_declare:
263 iounmap(hcd->regs);
264err_ioremap_regs:
265 iounmap(tmio->ccr);
266err_ioremap_ccr:
267 usb_put_hcd(hcd);
268err_usb_create_hcd:
269
270 return ret;
271}
272
273static int __devexit ohci_hcd_tmio_drv_remove(struct platform_device *dev)
274{
275 struct usb_hcd *hcd = platform_get_drvdata(dev);
276 struct tmio_hcd *tmio = hcd_to_tmio(hcd);
277 struct mfd_cell *cell = dev->dev.platform_data;
278
279 usb_remove_hcd(hcd);
280 tmio_stop_hc(dev);
281 if (cell->disable)
282 cell->disable(dev);
283 dma_release_declared_memory(&dev->dev);
284 iounmap(hcd->regs);
285 iounmap(tmio->ccr);
286 usb_put_hcd(hcd);
287
288 platform_set_drvdata(dev, NULL);
289
290 return 0;
291}
292
293#ifdef CONFIG_PM
294static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t state)
295{
296 struct mfd_cell *cell = dev->dev.platform_data;
297 struct usb_hcd *hcd = platform_get_drvdata(dev);
298 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
299 struct tmio_hcd *tmio = hcd_to_tmio(hcd);
300 unsigned long flags;
301 u8 misc;
302 int ret;
303
304 if (time_before(jiffies, ohci->next_statechange))
305 msleep(5);
306 ohci->next_statechange = jiffies;
307
308 spin_lock_irqsave(&tmio->lock, flags);
309
310 misc = tmio_ioread8(tmio->ccr + CCR_MISC);
311 misc |= 1 << 3; /* USSUSP */
312 tmio_iowrite8(misc, tmio->ccr + CCR_MISC);
313
314 spin_unlock_irqrestore(&tmio->lock, flags);
315
316 if (cell->suspend) {
317 ret = cell->suspend(dev);
318 if (ret)
319 return ret;
320 }
321
322 hcd->state = HC_STATE_SUSPENDED;
323
324 return 0;
325}
326
327static int ohci_hcd_tmio_drv_resume(struct platform_device *dev)
328{
329 struct mfd_cell *cell = dev->dev.platform_data;
330 struct usb_hcd *hcd = platform_get_drvdata(dev);
331 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
332 struct tmio_hcd *tmio = hcd_to_tmio(hcd);
333 unsigned long flags;
334 u8 misc;
335 int ret;
336
337 if (time_before(jiffies, ohci->next_statechange))
338 msleep(5);
339 ohci->next_statechange = jiffies;
340
341 if (cell->resume) {
342 ret = cell->resume(dev);
343 if (ret)
344 return ret;
345 }
346
347 tmio_start_hc(dev);
348
349 spin_lock_irqsave(&tmio->lock, flags);
350
351 misc = tmio_ioread8(tmio->ccr + CCR_MISC);
352 misc &= ~(1 << 3); /* USSUSP */
353 tmio_iowrite8(misc, tmio->ccr + CCR_MISC);
354
355 spin_unlock_irqrestore(&tmio->lock, flags);
356
357 ohci_finish_controller_resume(hcd);
358
359 return 0;
360}
361#else
362#define ohci_hcd_tmio_drv_suspend NULL
363#define ohci_hcd_tmio_drv_resume NULL
364#endif
365
366static struct platform_driver ohci_hcd_tmio_driver = {
367 .probe = ohci_hcd_tmio_drv_probe,
368 .remove = __devexit_p(ohci_hcd_tmio_drv_remove),
369 .shutdown = usb_hcd_platform_shutdown,
370 .suspend = ohci_hcd_tmio_drv_suspend,
371 .resume = ohci_hcd_tmio_drv_resume,
372 .driver = {
373 .name = "tmio-ohci",
374 .owner = THIS_MODULE,
375 },
376};
diff --git a/drivers/usb/host/whci/Kbuild b/drivers/usb/host/whci/Kbuild
new file mode 100644
index 000000000000..26a3871ea0f9
--- /dev/null
+++ b/drivers/usb/host/whci/Kbuild
@@ -0,0 +1,11 @@
1obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o
2
3whci-hcd-y := \
4 asl.o \
5 hcd.o \
6 hw.o \
7 init.o \
8 int.o \
9 pzl.o \
10 qset.o \
11 wusb.o
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c
new file mode 100644
index 000000000000..4d7078e50572
--- /dev/null
+++ b/drivers/usb/host/whci/asl.c
@@ -0,0 +1,367 @@
1/*
2 * Wireless Host Controller (WHC) asynchronous schedule management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h>
21#include <linux/usb.h>
22#define D_LOCAL 0
23#include <linux/uwb/debug.h>
24
25#include "../../wusbcore/wusbhc.h"
26
27#include "whcd.h"
28
29#if D_LOCAL >= 4
30static void dump_asl(struct whc *whc, const char *tag)
31{
32 struct device *dev = &whc->umc->dev;
33 struct whc_qset *qset;
34
35 d_printf(4, dev, "ASL %s\n", tag);
36
37 list_for_each_entry(qset, &whc->async_list, list_node) {
38 dump_qset(qset, dev);
39 }
40}
41#else
42static inline void dump_asl(struct whc *whc, const char *tag)
43{
44}
45#endif
46
47
48static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset,
49 struct whc_qset **next, struct whc_qset **prev)
50{
51 struct list_head *n, *p;
52
53 BUG_ON(list_empty(&whc->async_list));
54
55 n = qset->list_node.next;
56 if (n == &whc->async_list)
57 n = n->next;
58 p = qset->list_node.prev;
59 if (p == &whc->async_list)
60 p = p->prev;
61
62 *next = container_of(n, struct whc_qset, list_node);
63 *prev = container_of(p, struct whc_qset, list_node);
64
65}
66
67static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset)
68{
69 list_move(&qset->list_node, &whc->async_list);
70 qset->in_sw_list = true;
71}
72
73static void asl_qset_insert(struct whc *whc, struct whc_qset *qset)
74{
75 struct whc_qset *next, *prev;
76
77 qset_clear(whc, qset);
78
79 /* Link into ASL. */
80 qset_get_next_prev(whc, qset, &next, &prev);
81 whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma);
82 whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma);
83 qset->in_hw_list = true;
84}
85
86static void asl_qset_remove(struct whc *whc, struct whc_qset *qset)
87{
88 struct whc_qset *prev, *next;
89
90 qset_get_next_prev(whc, qset, &next, &prev);
91
92 list_move(&qset->list_node, &whc->async_removed_list);
93 qset->in_sw_list = false;
94
95 /*
96 * No more qsets in the ASL? The caller must stop the ASL as
97 * it's no longer valid.
98 */
99 if (list_empty(&whc->async_list))
100 return;
101
102 /* Remove from ASL. */
103 whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma);
104 qset->in_hw_list = false;
105}
106
107/**
108 * process_qset - process any recently inactivated or halted qTDs in a
109 * qset.
110 *
111 * After inactive qTDs are removed, new qTDs can be added if the
112 * urb queue still contains URBs.
113 *
114 * Returns any additional WUSBCMD bits for the ASL sync command (i.e.,
115 * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed).
116 */
117static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
118{
119 enum whc_update update = 0;
120 uint32_t status = 0;
121
122 while (qset->ntds) {
123 struct whc_qtd *td;
124 int t;
125
126 t = qset->td_start;
127 td = &qset->qtd[qset->td_start];
128 status = le32_to_cpu(td->status);
129
130 /*
131 * Nothing to do with a still active qTD.
132 */
133 if (status & QTD_STS_ACTIVE)
134 break;
135
136 if (status & QTD_STS_HALTED) {
137 /* Ug, an error. */
138 process_halted_qtd(whc, qset, td);
139 goto done;
140 }
141
142 /* Mmm, a completed qTD. */
143 process_inactive_qtd(whc, qset, td);
144 }
145
146 update |= qset_add_qtds(whc, qset);
147
148done:
149 /*
150 * Remove this qset from the ASL if requested, but only if has
151 * no qTDs.
152 */
153 if (qset->remove && qset->ntds == 0) {
154 asl_qset_remove(whc, qset);
155 update |= WHC_UPDATE_REMOVED;
156 }
157 return update;
158}
159
160void asl_start(struct whc *whc)
161{
162 struct whc_qset *qset;
163
164 qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
165
166 le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR);
167
168 whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN);
169 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
170 WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED,
171 1000, "start ASL");
172}
173
174void asl_stop(struct whc *whc)
175{
176 whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0);
177 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
178 WUSBSTS_ASYNC_SCHED, 0,
179 1000, "stop ASL");
180}
181
182void asl_update(struct whc *whc, uint32_t wusbcmd)
183{
184 whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
185 wait_event(whc->async_list_wq,
186 (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0);
187}
188
189/**
190 * scan_async_work - scan the ASL for qsets to process.
191 *
192 * Process each qset in the ASL in turn and then signal the WHC that
193 * the ASL has been updated.
194 *
195 * Then start, stop or update the asynchronous schedule as required.
196 */
197void scan_async_work(struct work_struct *work)
198{
199 struct whc *whc = container_of(work, struct whc, async_work);
200 struct whc_qset *qset, *t;
201 enum whc_update update = 0;
202
203 spin_lock_irq(&whc->lock);
204
205 dump_asl(whc, "before processing");
206
207 /*
208 * Transerve the software list backwards so new qsets can be
209 * safely inserted into the ASL without making it non-circular.
210 */
211 list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
212 if (!qset->in_hw_list) {
213 asl_qset_insert(whc, qset);
214 update |= WHC_UPDATE_ADDED;
215 }
216
217 update |= process_qset(whc, qset);
218 }
219
220 dump_asl(whc, "after processing");
221
222 spin_unlock_irq(&whc->lock);
223
224 if (update) {
225 uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB;
226 if (update & WHC_UPDATE_REMOVED)
227 wusbcmd |= WUSBCMD_ASYNC_QSET_RM;
228 asl_update(whc, wusbcmd);
229 }
230
231 /*
232 * Now that the ASL is updated, complete the removal of any
233 * removed qsets.
234 */
235 spin_lock(&whc->lock);
236
237 list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
238 qset_remove_complete(whc, qset);
239 }
240
241 spin_unlock(&whc->lock);
242}
243
244/**
245 * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL).
246 * @whc: the WHCI host controller
247 * @urb: the URB to enqueue
248 * @mem_flags: flags for any memory allocations
249 *
250 * The qset for the endpoint is obtained and the urb queued on to it.
251 *
252 * Work is scheduled to update the hardware's view of the ASL.
253 */
254int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
255{
256 struct whc_qset *qset;
257 int err;
258 unsigned long flags;
259
260 spin_lock_irqsave(&whc->lock, flags);
261
262 qset = get_qset(whc, urb, GFP_ATOMIC);
263 if (qset == NULL)
264 err = -ENOMEM;
265 else
266 err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
267 if (!err) {
268 usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
269 if (!qset->in_sw_list)
270 asl_qset_insert_begin(whc, qset);
271 }
272
273 spin_unlock_irqrestore(&whc->lock, flags);
274
275 if (!err)
276 queue_work(whc->workqueue, &whc->async_work);
277
278 return 0;
279}
280
281/**
282 * asl_urb_dequeue - remove an URB (qset) from the async list.
283 * @whc: the WHCI host controller
284 * @urb: the URB to dequeue
285 * @status: the current status of the URB
286 *
287 * URBs that do yet have qTDs can simply be removed from the software
288 * queue, otherwise the qset must be removed from the ASL so the qTDs
289 * can be removed.
290 */
291int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
292{
293 struct whc_urb *wurb = urb->hcpriv;
294 struct whc_qset *qset = wurb->qset;
295 struct whc_std *std, *t;
296 int ret;
297 unsigned long flags;
298
299 spin_lock_irqsave(&whc->lock, flags);
300
301 ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
302 if (ret < 0)
303 goto out;
304
305 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
306 if (std->urb == urb)
307 qset_free_std(whc, std);
308 else
309 std->qtd = NULL; /* so this std is re-added when the qset is */
310 }
311
312 asl_qset_remove(whc, qset);
313 wurb->status = status;
314 wurb->is_async = true;
315 queue_work(whc->workqueue, &wurb->dequeue_work);
316
317out:
318 spin_unlock_irqrestore(&whc->lock, flags);
319
320 return ret;
321}
322
323/**
324 * asl_qset_delete - delete a qset from the ASL
325 */
326void asl_qset_delete(struct whc *whc, struct whc_qset *qset)
327{
328 qset->remove = 1;
329 queue_work(whc->workqueue, &whc->async_work);
330 qset_delete(whc, qset);
331}
332
333/**
334 * asl_init - initialize the asynchronous schedule list
335 *
336 * A dummy qset with no qTDs is added to the ASL to simplify removing
337 * qsets (no need to stop the ASL when the last qset is removed).
338 */
339int asl_init(struct whc *whc)
340{
341 struct whc_qset *qset;
342
343 qset = qset_alloc(whc, GFP_KERNEL);
344 if (qset == NULL)
345 return -ENOMEM;
346
347 asl_qset_insert_begin(whc, qset);
348 asl_qset_insert(whc, qset);
349
350 return 0;
351}
352
353/**
354 * asl_clean_up - free ASL resources
355 *
356 * The ASL is stopped and empty except for the dummy qset.
357 */
358void asl_clean_up(struct whc *whc)
359{
360 struct whc_qset *qset;
361
362 if (!list_empty(&whc->async_list)) {
363 qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
364 list_del(&qset->list_node);
365 qset_free(whc, qset);
366 }
367}
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
new file mode 100644
index 000000000000..ef3ad4dca945
--- /dev/null
+++ b/drivers/usb/host/whci/hcd.c
@@ -0,0 +1,339 @@
1/*
2 * Wireless Host Controller (WHC) driver.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/uwb/umc.h>
22
23#include "../../wusbcore/wusbhc.h"
24
25#include "whcd.h"
26
27/*
28 * One time initialization.
29 *
30 * Nothing to do here.
31 */
32static int whc_reset(struct usb_hcd *usb_hcd)
33{
34 return 0;
35}
36
37/*
38 * Start the wireless host controller.
39 *
40 * Start device notification.
41 *
42 * Put hc into run state, set DNTS parameters.
43 */
44static int whc_start(struct usb_hcd *usb_hcd)
45{
46 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
47 struct whc *whc = wusbhc_to_whc(wusbhc);
48 u8 bcid;
49 int ret;
50
51 mutex_lock(&wusbhc->mutex);
52
53 le_writel(WUSBINTR_GEN_CMD_DONE
54 | WUSBINTR_HOST_ERR
55 | WUSBINTR_ASYNC_SCHED_SYNCED
56 | WUSBINTR_DNTS_INT
57 | WUSBINTR_ERR_INT
58 | WUSBINTR_INT,
59 whc->base + WUSBINTR);
60
61 /* set cluster ID */
62 bcid = wusb_cluster_id_get();
63 ret = whc_set_cluster_id(whc, bcid);
64 if (ret < 0)
65 goto out;
66 wusbhc->cluster_id = bcid;
67
68 /* start HC */
69 whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN);
70
71 usb_hcd->uses_new_polling = 1;
72 usb_hcd->poll_rh = 1;
73 usb_hcd->state = HC_STATE_RUNNING;
74
75out:
76 mutex_unlock(&wusbhc->mutex);
77 return ret;
78}
79
80
81/*
82 * Stop the wireless host controller.
83 *
84 * Stop device notification.
85 *
86 * Wait for pending transfer to stop? Put hc into stop state?
87 */
88static void whc_stop(struct usb_hcd *usb_hcd)
89{
90 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
91 struct whc *whc = wusbhc_to_whc(wusbhc);
92
93 mutex_lock(&wusbhc->mutex);
94
95 wusbhc_stop(wusbhc);
96
97 /* stop HC */
98 le_writel(0, whc->base + WUSBINTR);
99 whc_write_wusbcmd(whc, WUSBCMD_RUN, 0);
100 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
101 WUSBSTS_HCHALTED, WUSBSTS_HCHALTED,
102 100, "HC to halt");
103
104 wusb_cluster_id_put(wusbhc->cluster_id);
105
106 mutex_unlock(&wusbhc->mutex);
107}
108
109static int whc_get_frame_number(struct usb_hcd *usb_hcd)
110{
111 /* Frame numbers are not applicable to WUSB. */
112 return -ENOSYS;
113}
114
115
116/*
117 * Queue an URB to the ASL or PZL
118 */
119static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
120 gfp_t mem_flags)
121{
122 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
123 struct whc *whc = wusbhc_to_whc(wusbhc);
124 int ret;
125
126 switch (usb_pipetype(urb->pipe)) {
127 case PIPE_INTERRUPT:
128 ret = pzl_urb_enqueue(whc, urb, mem_flags);
129 break;
130 case PIPE_ISOCHRONOUS:
131 dev_err(&whc->umc->dev, "isochronous transfers unsupported\n");
132 ret = -ENOTSUPP;
133 break;
134 case PIPE_CONTROL:
135 case PIPE_BULK:
136 default:
137 ret = asl_urb_enqueue(whc, urb, mem_flags);
138 break;
139 };
140
141 return ret;
142}
143
144/*
145 * Remove a queued URB from the ASL or PZL.
146 */
147static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
148{
149 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
150 struct whc *whc = wusbhc_to_whc(wusbhc);
151 int ret;
152
153 switch (usb_pipetype(urb->pipe)) {
154 case PIPE_INTERRUPT:
155 ret = pzl_urb_dequeue(whc, urb, status);
156 break;
157 case PIPE_ISOCHRONOUS:
158 ret = -ENOTSUPP;
159 break;
160 case PIPE_CONTROL:
161 case PIPE_BULK:
162 default:
163 ret = asl_urb_dequeue(whc, urb, status);
164 break;
165 };
166
167 return ret;
168}
169
170/*
171 * Wait for all URBs to the endpoint to be completed, then delete the
172 * qset.
173 */
174static void whc_endpoint_disable(struct usb_hcd *usb_hcd,
175 struct usb_host_endpoint *ep)
176{
177 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
178 struct whc *whc = wusbhc_to_whc(wusbhc);
179 struct whc_qset *qset;
180
181 qset = ep->hcpriv;
182 if (qset) {
183 ep->hcpriv = NULL;
184 if (usb_endpoint_xfer_bulk(&ep->desc)
185 || usb_endpoint_xfer_control(&ep->desc))
186 asl_qset_delete(whc, qset);
187 else
188 pzl_qset_delete(whc, qset);
189 }
190}
191
192static struct hc_driver whc_hc_driver = {
193 .description = "whci-hcd",
194 .product_desc = "Wireless host controller",
195 .hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd),
196 .irq = whc_int_handler,
197 .flags = HCD_USB2,
198
199 .reset = whc_reset,
200 .start = whc_start,
201 .stop = whc_stop,
202 .get_frame_number = whc_get_frame_number,
203 .urb_enqueue = whc_urb_enqueue,
204 .urb_dequeue = whc_urb_dequeue,
205 .endpoint_disable = whc_endpoint_disable,
206
207 .hub_status_data = wusbhc_rh_status_data,
208 .hub_control = wusbhc_rh_control,
209 .bus_suspend = wusbhc_rh_suspend,
210 .bus_resume = wusbhc_rh_resume,
211 .start_port_reset = wusbhc_rh_start_port_reset,
212};
213
214static int whc_probe(struct umc_dev *umc)
215{
216 int ret = -ENOMEM;
217 struct usb_hcd *usb_hcd;
218 struct wusbhc *wusbhc = NULL;
219 struct whc *whc = NULL;
220 struct device *dev = &umc->dev;
221
222 usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci");
223 if (usb_hcd == NULL) {
224 dev_err(dev, "unable to create hcd\n");
225 goto error;
226 }
227
228 usb_hcd->wireless = 1;
229
230 wusbhc = usb_hcd_to_wusbhc(usb_hcd);
231 whc = wusbhc_to_whc(wusbhc);
232 whc->umc = umc;
233
234 ret = whc_init(whc);
235 if (ret)
236 goto error;
237
238 wusbhc->dev = dev;
239 wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent);
240 if (!wusbhc->uwb_rc) {
241 ret = -ENODEV;
242 dev_err(dev, "cannot get radio controller\n");
243 goto error;
244 }
245
246 if (whc->n_devices > USB_MAXCHILDREN) {
247 dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n",
248 whc->n_devices);
249 wusbhc->ports_max = USB_MAXCHILDREN;
250 } else
251 wusbhc->ports_max = whc->n_devices;
252 wusbhc->mmcies_max = whc->n_mmc_ies;
253 wusbhc->start = whc_wusbhc_start;
254 wusbhc->stop = whc_wusbhc_stop;
255 wusbhc->mmcie_add = whc_mmcie_add;
256 wusbhc->mmcie_rm = whc_mmcie_rm;
257 wusbhc->dev_info_set = whc_dev_info_set;
258 wusbhc->bwa_set = whc_bwa_set;
259 wusbhc->set_num_dnts = whc_set_num_dnts;
260 wusbhc->set_ptk = whc_set_ptk;
261 wusbhc->set_gtk = whc_set_gtk;
262
263 ret = wusbhc_create(wusbhc);
264 if (ret)
265 goto error_wusbhc_create;
266
267 ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED);
268 if (ret) {
269 dev_err(dev, "cannot add HCD: %d\n", ret);
270 goto error_usb_add_hcd;
271 }
272
273 ret = wusbhc_b_create(wusbhc);
274 if (ret) {
275 dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret);
276 goto error_wusbhc_b_create;
277 }
278
279 return 0;
280
281error_wusbhc_b_create:
282 usb_remove_hcd(usb_hcd);
283error_usb_add_hcd:
284 wusbhc_destroy(wusbhc);
285error_wusbhc_create:
286 uwb_rc_put(wusbhc->uwb_rc);
287error:
288 whc_clean_up(whc);
289 if (usb_hcd)
290 usb_put_hcd(usb_hcd);
291 return ret;
292}
293
294
295static void whc_remove(struct umc_dev *umc)
296{
297 struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev);
298 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
299 struct whc *whc = wusbhc_to_whc(wusbhc);
300
301 if (usb_hcd) {
302 wusbhc_b_destroy(wusbhc);
303 usb_remove_hcd(usb_hcd);
304 wusbhc_destroy(wusbhc);
305 uwb_rc_put(wusbhc->uwb_rc);
306 whc_clean_up(whc);
307 usb_put_hcd(usb_hcd);
308 }
309}
310
311static struct umc_driver whci_hc_driver = {
312 .name = "whci-hcd",
313 .cap_id = UMC_CAP_ID_WHCI_WUSB_HC,
314 .probe = whc_probe,
315 .remove = whc_remove,
316};
317
318static int __init whci_hc_driver_init(void)
319{
320 return umc_driver_register(&whci_hc_driver);
321}
322module_init(whci_hc_driver_init);
323
324static void __exit whci_hc_driver_exit(void)
325{
326 umc_driver_unregister(&whci_hc_driver);
327}
328module_exit(whci_hc_driver_exit);
329
330/* PCI device ID's that we handle (so it gets loaded) */
331static struct pci_device_id whci_hcd_id_table[] = {
332 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
333 { /* empty last entry */ }
334};
335MODULE_DEVICE_TABLE(pci, whci_hcd_id_table);
336
337MODULE_DESCRIPTION("WHCI Wireless USB host controller driver");
338MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
339MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/whci/hw.c b/drivers/usb/host/whci/hw.c
new file mode 100644
index 000000000000..ac86e59c1225
--- /dev/null
+++ b/drivers/usb/host/whci/hw.c
@@ -0,0 +1,87 @@
1/*
2 * Wireless Host Controller (WHC) hardware access helpers.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h>
21
22#include "../../wusbcore/wusbhc.h"
23
24#include "whcd.h"
25
26void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val)
27{
28 unsigned long flags;
29 u32 cmd;
30
31 spin_lock_irqsave(&whc->lock, flags);
32
33 cmd = le_readl(whc->base + WUSBCMD);
34 cmd = (cmd & ~mask) | val;
35 le_writel(cmd, whc->base + WUSBCMD);
36
37 spin_unlock_irqrestore(&whc->lock, flags);
38}
39
40/**
41 * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register
42 * @whc: the WHCI HC
43 * @cmd: command to start.
44 * @params: parameters for the command (the WUSBGENCMDPARAMS register value).
45 * @addr: pointer to any data for the command (may be NULL).
46 * @len: length of the data (if any).
47 */
48int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len)
49{
50 unsigned long flags;
51 dma_addr_t dma_addr;
52 int t;
53
54 mutex_lock(&whc->mutex);
55
56 /* Wait for previous command to complete. */
57 t = wait_event_timeout(whc->cmd_wq,
58 (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0,
59 WHC_GENCMD_TIMEOUT_MS);
60 if (t == 0) {
61 dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n",
62 le_readl(whc->base + WUSBGENCMDSTS),
63 le_readl(whc->base + WUSBGENCMDPARAMS));
64 return -ETIMEDOUT;
65 }
66
67 if (addr) {
68 memcpy(whc->gen_cmd_buf, addr, len);
69 dma_addr = whc->gen_cmd_buf_dma;
70 } else
71 dma_addr = 0;
72
73 /* Poke registers to start cmd. */
74 spin_lock_irqsave(&whc->lock, flags);
75
76 le_writel(params, whc->base + WUSBGENCMDPARAMS);
77 le_writeq(dma_addr, whc->base + WUSBGENADDR);
78
79 le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd,
80 whc->base + WUSBGENCMDSTS);
81
82 spin_unlock_irqrestore(&whc->lock, flags);
83
84 mutex_unlock(&whc->mutex);
85
86 return 0;
87}
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c
new file mode 100644
index 000000000000..34a783cb0133
--- /dev/null
+++ b/drivers/usb/host/whci/init.c
@@ -0,0 +1,188 @@
1/*
2 * Wireless Host Controller (WHC) initialization.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h>
21
22#include "../../wusbcore/wusbhc.h"
23
24#include "whcd.h"
25
26/*
27 * Reset the host controller.
28 */
29static void whc_hw_reset(struct whc *whc)
30{
31 le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD);
32 whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0,
33 100, "reset");
34}
35
36static void whc_hw_init_di_buf(struct whc *whc)
37{
38 int d;
39
40 /* Disable all entries in the Device Information buffer. */
41 for (d = 0; d < whc->n_devices; d++)
42 whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE;
43
44 le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR);
45}
46
47static void whc_hw_init_dn_buf(struct whc *whc)
48{
49 /* Clear the Device Notification buffer to ensure the V (valid)
50 * bits are clear. */
51 memset(whc->dn_buf, 0, 4096);
52
53 le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR);
54}
55
56int whc_init(struct whc *whc)
57{
58 u32 whcsparams;
59 int ret, i;
60 resource_size_t start, len;
61
62 spin_lock_init(&whc->lock);
63 mutex_init(&whc->mutex);
64 init_waitqueue_head(&whc->cmd_wq);
65 init_waitqueue_head(&whc->async_list_wq);
66 init_waitqueue_head(&whc->periodic_list_wq);
67 whc->workqueue = create_singlethread_workqueue(dev_name(&whc->umc->dev));
68 if (whc->workqueue == NULL) {
69 ret = -ENOMEM;
70 goto error;
71 }
72 INIT_WORK(&whc->dn_work, whc_dn_work);
73
74 INIT_WORK(&whc->async_work, scan_async_work);
75 INIT_LIST_HEAD(&whc->async_list);
76 INIT_LIST_HEAD(&whc->async_removed_list);
77
78 INIT_WORK(&whc->periodic_work, scan_periodic_work);
79 for (i = 0; i < 5; i++)
80 INIT_LIST_HEAD(&whc->periodic_list[i]);
81 INIT_LIST_HEAD(&whc->periodic_removed_list);
82
83 /* Map HC registers. */
84 start = whc->umc->resource.start;
85 len = whc->umc->resource.end - start + 1;
86 if (!request_mem_region(start, len, "whci-hc")) {
87 dev_err(&whc->umc->dev, "can't request HC region\n");
88 ret = -EBUSY;
89 goto error;
90 }
91 whc->base_phys = start;
92 whc->base = ioremap(start, len);
93 if (!whc->base) {
94 dev_err(&whc->umc->dev, "ioremap\n");
95 ret = -ENOMEM;
96 goto error;
97 }
98
99 whc_hw_reset(whc);
100
101 /* Read maximum number of devices, keys and MMC IEs. */
102 whcsparams = le_readl(whc->base + WHCSPARAMS);
103 whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams);
104 whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams);
105 whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams);
106
107 dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n",
108 whc->n_devices, whc->n_keys, whc->n_mmc_ies);
109
110 whc->qset_pool = dma_pool_create("qset", &whc->umc->dev,
111 sizeof(struct whc_qset), 64, 0);
112 if (whc->qset_pool == NULL) {
113 ret = -ENOMEM;
114 goto error;
115 }
116
117 ret = asl_init(whc);
118 if (ret < 0)
119 goto error;
120 ret = pzl_init(whc);
121 if (ret < 0)
122 goto error;
123
124 /* Allocate and initialize a buffer for generic commands, the
125 Device Information buffer, and the Device Notification
126 buffer. */
127
128 whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
129 &whc->gen_cmd_buf_dma, GFP_KERNEL);
130 if (whc->gen_cmd_buf == NULL) {
131 ret = -ENOMEM;
132 goto error;
133 }
134
135 whc->dn_buf = dma_alloc_coherent(&whc->umc->dev,
136 sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
137 &whc->dn_buf_dma, GFP_KERNEL);
138 if (!whc->dn_buf) {
139 ret = -ENOMEM;
140 goto error;
141 }
142 whc_hw_init_dn_buf(whc);
143
144 whc->di_buf = dma_alloc_coherent(&whc->umc->dev,
145 sizeof(struct di_buf_entry) * whc->n_devices,
146 &whc->di_buf_dma, GFP_KERNEL);
147 if (!whc->di_buf) {
148 ret = -ENOMEM;
149 goto error;
150 }
151 whc_hw_init_di_buf(whc);
152
153 return 0;
154
155error:
156 whc_clean_up(whc);
157 return ret;
158}
159
160void whc_clean_up(struct whc *whc)
161{
162 resource_size_t len;
163
164 if (whc->di_buf)
165 dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices,
166 whc->di_buf, whc->di_buf_dma);
167 if (whc->dn_buf)
168 dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
169 whc->dn_buf, whc->dn_buf_dma);
170 if (whc->gen_cmd_buf)
171 dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
172 whc->gen_cmd_buf, whc->gen_cmd_buf_dma);
173
174 pzl_clean_up(whc);
175 asl_clean_up(whc);
176
177 if (whc->qset_pool)
178 dma_pool_destroy(whc->qset_pool);
179
180 len = whc->umc->resource.end - whc->umc->resource.start + 1;
181 if (whc->base)
182 iounmap(whc->base);
183 if (whc->base_phys)
184 release_mem_region(whc->base_phys, len);
185
186 if (whc->workqueue)
187 destroy_workqueue(whc->workqueue);
188}
diff --git a/drivers/usb/host/whci/int.c b/drivers/usb/host/whci/int.c
new file mode 100644
index 000000000000..fce01174aa9b
--- /dev/null
+++ b/drivers/usb/host/whci/int.c
@@ -0,0 +1,95 @@
1/*
2 * Wireless Host Controller (WHC) interrupt handling.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/uwb/umc.h>
22
23#include "../../wusbcore/wusbhc.h"
24
25#include "whcd.h"
26
27static void transfer_done(struct whc *whc)
28{
29 queue_work(whc->workqueue, &whc->async_work);
30 queue_work(whc->workqueue, &whc->periodic_work);
31}
32
33irqreturn_t whc_int_handler(struct usb_hcd *hcd)
34{
35 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd);
36 struct whc *whc = wusbhc_to_whc(wusbhc);
37 u32 sts;
38
39 sts = le_readl(whc->base + WUSBSTS);
40 if (!(sts & WUSBSTS_INT_MASK))
41 return IRQ_NONE;
42 le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS);
43
44 if (sts & WUSBSTS_GEN_CMD_DONE)
45 wake_up(&whc->cmd_wq);
46
47 if (sts & WUSBSTS_HOST_ERR)
48 dev_err(&whc->umc->dev, "FIXME: host system error\n");
49
50 if (sts & WUSBSTS_ASYNC_SCHED_SYNCED)
51 wake_up(&whc->async_list_wq);
52
53 if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED)
54 wake_up(&whc->periodic_list_wq);
55
56 if (sts & WUSBSTS_DNTS_INT)
57 queue_work(whc->workqueue, &whc->dn_work);
58
59 /*
60 * A transfer completed (see [WHCI] section 4.7.1.2 for when
61 * this occurs).
62 */
63 if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT))
64 transfer_done(whc);
65
66 return IRQ_HANDLED;
67}
68
69static int process_dn_buf(struct whc *whc)
70{
71 struct wusbhc *wusbhc = &whc->wusbhc;
72 struct dn_buf_entry *dn;
73 int processed = 0;
74
75 for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) {
76 if (dn->status & WHC_DN_STATUS_VALID) {
77 wusbhc_handle_dn(wusbhc, dn->src_addr,
78 (struct wusb_dn_hdr *)dn->dn_data,
79 dn->msg_size);
80 dn->status &= ~WHC_DN_STATUS_VALID;
81 processed++;
82 }
83 }
84 return processed;
85}
86
87void whc_dn_work(struct work_struct *work)
88{
89 struct whc *whc = container_of(work, struct whc, dn_work);
90 int processed;
91
92 do {
93 processed = process_dn_buf(whc);
94 } while (processed);
95}
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c
new file mode 100644
index 000000000000..8d62df0c330b
--- /dev/null
+++ b/drivers/usb/host/whci/pzl.c
@@ -0,0 +1,398 @@
1/*
2 * Wireless Host Controller (WHC) periodic schedule management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h>
21#include <linux/usb.h>
22#define D_LOCAL 0
23#include <linux/uwb/debug.h>
24
25#include "../../wusbcore/wusbhc.h"
26
27#include "whcd.h"
28
29#if D_LOCAL >= 4
30static void dump_pzl(struct whc *whc, const char *tag)
31{
32 struct device *dev = &whc->umc->dev;
33 struct whc_qset *qset;
34 int period = 0;
35
36 d_printf(4, dev, "PZL %s\n", tag);
37
38 for (period = 0; period < 5; period++) {
39 d_printf(4, dev, "Period %d\n", period);
40 list_for_each_entry(qset, &whc->periodic_list[period], list_node) {
41 dump_qset(qset, dev);
42 }
43 }
44}
45#else
46static inline void dump_pzl(struct whc *whc, const char *tag)
47{
48}
49#endif
50
51static void update_pzl_pointers(struct whc *whc, int period, u64 addr)
52{
53 switch (period) {
54 case 0:
55 whc_qset_set_link_ptr(&whc->pz_list[0], addr);
56 whc_qset_set_link_ptr(&whc->pz_list[2], addr);
57 whc_qset_set_link_ptr(&whc->pz_list[4], addr);
58 whc_qset_set_link_ptr(&whc->pz_list[6], addr);
59 whc_qset_set_link_ptr(&whc->pz_list[8], addr);
60 whc_qset_set_link_ptr(&whc->pz_list[10], addr);
61 whc_qset_set_link_ptr(&whc->pz_list[12], addr);
62 whc_qset_set_link_ptr(&whc->pz_list[14], addr);
63 break;
64 case 1:
65 whc_qset_set_link_ptr(&whc->pz_list[1], addr);
66 whc_qset_set_link_ptr(&whc->pz_list[5], addr);
67 whc_qset_set_link_ptr(&whc->pz_list[9], addr);
68 whc_qset_set_link_ptr(&whc->pz_list[13], addr);
69 break;
70 case 2:
71 whc_qset_set_link_ptr(&whc->pz_list[3], addr);
72 whc_qset_set_link_ptr(&whc->pz_list[11], addr);
73 break;
74 case 3:
75 whc_qset_set_link_ptr(&whc->pz_list[7], addr);
76 break;
77 case 4:
78 whc_qset_set_link_ptr(&whc->pz_list[15], addr);
79 break;
80 }
81}
82
83/*
84 * Return the 'period' to use for this qset. The minimum interval for
85 * the endpoint is used so whatever urbs are submitted the device is
86 * polled often enough.
87 */
88static int qset_get_period(struct whc *whc, struct whc_qset *qset)
89{
90 uint8_t bInterval = qset->ep->desc.bInterval;
91
92 if (bInterval < 6)
93 bInterval = 6;
94 if (bInterval > 10)
95 bInterval = 10;
96 return bInterval - 6;
97}
98
99static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset)
100{
101 int period;
102
103 period = qset_get_period(whc, qset);
104
105 qset_clear(whc, qset);
106 list_move(&qset->list_node, &whc->periodic_list[period]);
107 qset->in_sw_list = true;
108}
109
110static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset)
111{
112 list_move(&qset->list_node, &whc->periodic_removed_list);
113 qset->in_hw_list = false;
114 qset->in_sw_list = false;
115}
116
117/**
118 * pzl_process_qset - process any recently inactivated or halted qTDs
119 * in a qset.
120 *
121 * After inactive qTDs are removed, new qTDs can be added if the
122 * urb queue still contains URBs.
123 *
124 * Returns the schedule updates required.
125 */
126static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
127{
128 enum whc_update update = 0;
129 uint32_t status = 0;
130
131 while (qset->ntds) {
132 struct whc_qtd *td;
133 int t;
134
135 t = qset->td_start;
136 td = &qset->qtd[qset->td_start];
137 status = le32_to_cpu(td->status);
138
139 /*
140 * Nothing to do with a still active qTD.
141 */
142 if (status & QTD_STS_ACTIVE)
143 break;
144
145 if (status & QTD_STS_HALTED) {
146 /* Ug, an error. */
147 process_halted_qtd(whc, qset, td);
148 goto done;
149 }
150
151 /* Mmm, a completed qTD. */
152 process_inactive_qtd(whc, qset, td);
153 }
154
155 update |= qset_add_qtds(whc, qset);
156
157done:
158 /*
159 * If there are no qTDs in this qset, remove it from the PZL.
160 */
161 if (qset->remove && qset->ntds == 0) {
162 pzl_qset_remove(whc, qset);
163 update |= WHC_UPDATE_REMOVED;
164 }
165
166 return update;
167}
168
169/**
170 * pzl_start - start the periodic schedule
171 * @whc: the WHCI host controller
172 *
173 * The PZL must be valid (e.g., all entries in the list should have
174 * the T bit set).
175 */
176void pzl_start(struct whc *whc)
177{
178 le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
179
180 whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN);
181 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
182 WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED,
183 1000, "start PZL");
184}
185
186/**
187 * pzl_stop - stop the periodic schedule
188 * @whc: the WHCI host controller
189 */
190void pzl_stop(struct whc *whc)
191{
192 whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0);
193 whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
194 WUSBSTS_PERIODIC_SCHED, 0,
195 1000, "stop PZL");
196}
197
198void pzl_update(struct whc *whc, uint32_t wusbcmd)
199{
200 whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
201 wait_event(whc->periodic_list_wq,
202 (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0);
203}
204
205static void update_pzl_hw_view(struct whc *whc)
206{
207 struct whc_qset *qset, *t;
208 int period;
209 u64 tmp_qh = 0;
210
211 for (period = 0; period < 5; period++) {
212 list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
213 whc_qset_set_link_ptr(&qset->qh.link, tmp_qh);
214 tmp_qh = qset->qset_dma;
215 qset->in_hw_list = true;
216 }
217 update_pzl_pointers(whc, period, tmp_qh);
218 }
219}
220
221/**
222 * scan_periodic_work - scan the PZL for qsets to process.
223 *
224 * Process each qset in the PZL in turn and then signal the WHC that
225 * the PZL has been updated.
226 *
227 * Then start, stop or update the periodic schedule as required.
228 */
229void scan_periodic_work(struct work_struct *work)
230{
231 struct whc *whc = container_of(work, struct whc, periodic_work);
232 struct whc_qset *qset, *t;
233 enum whc_update update = 0;
234 int period;
235
236 spin_lock_irq(&whc->lock);
237
238 dump_pzl(whc, "before processing");
239
240 for (period = 4; period >= 0; period--) {
241 list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
242 if (!qset->in_hw_list)
243 update |= WHC_UPDATE_ADDED;
244 update |= pzl_process_qset(whc, qset);
245 }
246 }
247
248 if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED))
249 update_pzl_hw_view(whc);
250
251 dump_pzl(whc, "after processing");
252
253 spin_unlock_irq(&whc->lock);
254
255 if (update) {
256 uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB;
257 if (update & WHC_UPDATE_REMOVED)
258 wusbcmd |= WUSBCMD_PERIODIC_QSET_RM;
259 pzl_update(whc, wusbcmd);
260 }
261
262 /*
263 * Now that the PZL is updated, complete the removal of any
264 * removed qsets.
265 */
266 spin_lock(&whc->lock);
267
268 list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
269 qset_remove_complete(whc, qset);
270 }
271
272 spin_unlock(&whc->lock);
273}
274
275/**
276 * pzl_urb_enqueue - queue an URB onto the periodic list (PZL)
277 * @whc: the WHCI host controller
278 * @urb: the URB to enqueue
279 * @mem_flags: flags for any memory allocations
280 *
281 * The qset for the endpoint is obtained and the urb queued on to it.
282 *
283 * Work is scheduled to update the hardware's view of the PZL.
284 */
285int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
286{
287 struct whc_qset *qset;
288 int err;
289 unsigned long flags;
290
291 spin_lock_irqsave(&whc->lock, flags);
292
293 qset = get_qset(whc, urb, GFP_ATOMIC);
294 if (qset == NULL)
295 err = -ENOMEM;
296 else
297 err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
298 if (!err) {
299 usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
300 if (!qset->in_sw_list)
301 qset_insert_in_sw_list(whc, qset);
302 }
303
304 spin_unlock_irqrestore(&whc->lock, flags);
305
306 if (!err)
307 queue_work(whc->workqueue, &whc->periodic_work);
308
309 return 0;
310}
311
312/**
313 * pzl_urb_dequeue - remove an URB (qset) from the periodic list
314 * @whc: the WHCI host controller
315 * @urb: the URB to dequeue
316 * @status: the current status of the URB
317 *
318 * URBs that do yet have qTDs can simply be removed from the software
319 * queue, otherwise the qset must be removed so the qTDs can be safely
320 * removed.
321 */
322int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
323{
324 struct whc_urb *wurb = urb->hcpriv;
325 struct whc_qset *qset = wurb->qset;
326 struct whc_std *std, *t;
327 int ret;
328 unsigned long flags;
329
330 spin_lock_irqsave(&whc->lock, flags);
331
332 ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
333 if (ret < 0)
334 goto out;
335
336 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
337 if (std->urb == urb)
338 qset_free_std(whc, std);
339 else
340 std->qtd = NULL; /* so this std is re-added when the qset is */
341 }
342
343 pzl_qset_remove(whc, qset);
344 wurb->status = status;
345 wurb->is_async = false;
346 queue_work(whc->workqueue, &wurb->dequeue_work);
347
348out:
349 spin_unlock_irqrestore(&whc->lock, flags);
350
351 return ret;
352}
353
354/**
355 * pzl_qset_delete - delete a qset from the PZL
356 */
357void pzl_qset_delete(struct whc *whc, struct whc_qset *qset)
358{
359 qset->remove = 1;
360 queue_work(whc->workqueue, &whc->periodic_work);
361 qset_delete(whc, qset);
362}
363
364
365/**
366 * pzl_init - initialize the periodic zone list
367 * @whc: the WHCI host controller
368 */
369int pzl_init(struct whc *whc)
370{
371 int i;
372
373 whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16,
374 &whc->pz_list_dma, GFP_KERNEL);
375 if (whc->pz_list == NULL)
376 return -ENOMEM;
377
378 /* Set T bit on all elements in PZL. */
379 for (i = 0; i < 16; i++)
380 whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
381
382 le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
383
384 return 0;
385}
386
387/**
388 * pzl_clean_up - free PZL resources
389 * @whc: the WHCI host controller
390 *
391 * The PZL is stopped and empty.
392 */
393void pzl_clean_up(struct whc *whc)
394{
395 if (whc->pz_list)
396 dma_free_coherent(&whc->umc->dev, sizeof(u64) * 16, whc->pz_list,
397 whc->pz_list_dma);
398}
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
new file mode 100644
index 000000000000..0420037d2e18
--- /dev/null
+++ b/drivers/usb/host/whci/qset.c
@@ -0,0 +1,567 @@
1/*
2 * Wireless Host Controller (WHC) qset management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/dma-mapping.h>
20#include <linux/uwb/umc.h>
21#include <linux/usb.h>
22
23#include "../../wusbcore/wusbhc.h"
24
25#include "whcd.h"
26
27void dump_qset(struct whc_qset *qset, struct device *dev)
28{
29 struct whc_std *std;
30 struct urb *urb = NULL;
31 int i;
32
33 dev_dbg(dev, "qset %08x\n", (u32)qset->qset_dma);
34 dev_dbg(dev, " -> %08x\n", (u32)qset->qh.link);
35 dev_dbg(dev, " info: %08x %08x %08x\n",
36 qset->qh.info1, qset->qh.info2, qset->qh.info3);
37 dev_dbg(dev, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count);
38 dev_dbg(dev, " TD: sts: %08x opts: %08x\n",
39 qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
40
41 for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
42 dev_dbg(dev, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
43 i == qset->td_start ? 'S' : ' ',
44 i == qset->td_end ? 'E' : ' ',
45 i, qset->qtd[i].status, qset->qtd[i].options,
46 (u32)qset->qtd[i].page_list_ptr);
47 }
48 dev_dbg(dev, " ntds: %d\n", qset->ntds);
49 list_for_each_entry(std, &qset->stds, list_node) {
50 if (urb != std->urb) {
51 urb = std->urb;
52 dev_dbg(dev, " urb %p transferred: %d bytes\n", urb,
53 urb->actual_length);
54 }
55 if (std->qtd)
56 dev_dbg(dev, " sTD[%td]: %zu bytes @ %08x\n",
57 std->qtd - &qset->qtd[0],
58 std->len, std->num_pointers ?
59 (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
60 else
61 dev_dbg(dev, " sTD[-]: %zd bytes @ %08x\n",
62 std->len, std->num_pointers ?
63 (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
64 }
65}
66
67struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
68{
69 struct whc_qset *qset;
70 dma_addr_t dma;
71
72 qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
73 if (qset == NULL)
74 return NULL;
75 memset(qset, 0, sizeof(struct whc_qset));
76
77 qset->qset_dma = dma;
78 qset->whc = whc;
79
80 INIT_LIST_HEAD(&qset->list_node);
81 INIT_LIST_HEAD(&qset->stds);
82
83 return qset;
84}
85
86/**
87 * qset_fill_qh - fill the static endpoint state in a qset's QHead
88 * @qset: the qset whose QH needs initializing with static endpoint
89 * state
90 * @urb: an urb for a transfer to this endpoint
91 */
92static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
93{
94 struct usb_device *usb_dev = urb->dev;
95 struct usb_wireless_ep_comp_descriptor *epcd;
96 bool is_out;
97
98 is_out = usb_pipeout(urb->pipe);
99
100 epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
101
102 if (epcd) {
103 qset->max_seq = epcd->bMaxSequence;
104 qset->max_burst = epcd->bMaxBurst;
105 } else {
106 qset->max_seq = 2;
107 qset->max_burst = 1;
108 }
109
110 qset->qh.info1 = cpu_to_le32(
111 QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
112 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
113 | usb_pipe_to_qh_type(urb->pipe)
114 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
115 | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out))
116 );
117 qset->qh.info2 = cpu_to_le32(
118 QH_INFO2_BURST(qset->max_burst)
119 | QH_INFO2_DBP(0)
120 | QH_INFO2_MAX_COUNT(3)
121 | QH_INFO2_MAX_RETRY(3)
122 | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
123 );
124 /* FIXME: where can we obtain these Tx parameters from? Why
125 * doesn't the chip know what Tx power to use? It knows the Rx
126 * strength and can presumably guess the Tx power required
127 * from that? */
128 qset->qh.info3 = cpu_to_le32(
129 QH_INFO3_TX_RATE_53_3
130 | QH_INFO3_TX_PWR(0) /* 0 == max power */
131 );
132}
133
134/**
135 * qset_clear - clear fields in a qset so it may be reinserted into a
136 * schedule
137 */
138void qset_clear(struct whc *whc, struct whc_qset *qset)
139{
140 qset->td_start = qset->td_end = qset->ntds = 0;
141 qset->remove = 0;
142
143 qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
144 qset->qh.status = cpu_to_le16(QH_STATUS_ICUR(qset->td_start));
145 qset->qh.err_count = 0;
146 qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
147 qset->qh.scratch[0] = 0;
148 qset->qh.scratch[1] = 0;
149 qset->qh.scratch[2] = 0;
150
151 memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
152
153 init_completion(&qset->remove_complete);
154}
155
156/**
157 * get_qset - get the qset for an async endpoint
158 *
159 * A new qset is created if one does not already exist.
160 */
161struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
162 gfp_t mem_flags)
163{
164 struct whc_qset *qset;
165
166 qset = urb->ep->hcpriv;
167 if (qset == NULL) {
168 qset = qset_alloc(whc, mem_flags);
169 if (qset == NULL)
170 return NULL;
171
172 qset->ep = urb->ep;
173 urb->ep->hcpriv = qset;
174 qset_fill_qh(qset, urb);
175 }
176 return qset;
177}
178
179void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
180{
181 list_del_init(&qset->list_node);
182 complete(&qset->remove_complete);
183}
184
185/**
186 * qset_add_qtds - add qTDs for an URB to a qset
187 *
188 * Returns true if the list (ASL/PZL) must be updated because (for a
189 * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
190 */
191enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
192{
193 struct whc_std *std;
194 enum whc_update update = 0;
195
196 list_for_each_entry(std, &qset->stds, list_node) {
197 struct whc_qtd *qtd;
198 uint32_t status;
199
200 if (qset->ntds >= WHCI_QSET_TD_MAX
201 || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
202 break;
203
204 if (std->qtd)
205 continue; /* already has a qTD */
206
207 qtd = std->qtd = &qset->qtd[qset->td_end];
208
209 /* Fill in setup bytes for control transfers. */
210 if (usb_pipecontrol(std->urb->pipe))
211 memcpy(qtd->setup, std->urb->setup_packet, 8);
212
213 status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
214
215 if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
216 status |= QTD_STS_LAST_PKT;
217
218 /*
219 * For an IN transfer the iAlt field should be set so
220 * the h/w will automatically advance to the next
221 * transfer. However, if there are 8 or more TDs
222 * remaining in this transfer then iAlt cannot be set
223 * as it could point to somewhere in this transfer.
224 */
225 if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
226 int ialt;
227 ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
228 status |= QTD_STS_IALT(ialt);
229 } else if (usb_pipein(std->urb->pipe))
230 qset->pause_after_urb = std->urb;
231
232 if (std->num_pointers)
233 qtd->options = cpu_to_le32(QTD_OPT_IOC);
234 else
235 qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
236 qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
237
238 qtd->status = cpu_to_le32(status);
239
240 if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
241 update = WHC_UPDATE_UPDATED;
242
243 if (++qset->td_end >= WHCI_QSET_TD_MAX)
244 qset->td_end = 0;
245 qset->ntds++;
246 }
247
248 return update;
249}
250
251/**
252 * qset_remove_qtd - remove the first qTD from a qset.
253 *
254 * The qTD might be still active (if it's part of a IN URB that
255 * resulted in a short read) so ensure it's deactivated.
256 */
257static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
258{
259 qset->qtd[qset->td_start].status = 0;
260
261 if (++qset->td_start >= WHCI_QSET_TD_MAX)
262 qset->td_start = 0;
263 qset->ntds--;
264}
265
266/**
267 * qset_free_std - remove an sTD and free it.
268 * @whc: the WHCI host controller
269 * @std: the sTD to remove and free.
270 */
271void qset_free_std(struct whc *whc, struct whc_std *std)
272{
273 list_del(&std->list_node);
274 if (std->num_pointers) {
275 dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
276 std->num_pointers * sizeof(struct whc_page_list_entry),
277 DMA_TO_DEVICE);
278 kfree(std->pl_virt);
279 }
280
281 kfree(std);
282}
283
284/**
285 * qset_remove_qtds - remove an URB's qTDs (and sTDs).
286 */
287static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
288 struct urb *urb)
289{
290 struct whc_std *std, *t;
291
292 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
293 if (std->urb != urb)
294 break;
295 if (std->qtd != NULL)
296 qset_remove_qtd(whc, qset);
297 qset_free_std(whc, std);
298 }
299}
300
301/**
302 * qset_free_stds - free any remaining sTDs for an URB.
303 */
304static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
305{
306 struct whc_std *std, *t;
307
308 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
309 if (std->urb == urb)
310 qset_free_std(qset->whc, std);
311 }
312}
313
314static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
315{
316 dma_addr_t dma_addr = std->dma_addr;
317 dma_addr_t sp, ep;
318 size_t std_len = std->len;
319 size_t pl_len;
320 int p;
321
322 sp = ALIGN(dma_addr, WHCI_PAGE_SIZE);
323 ep = dma_addr + std_len;
324 std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
325
326 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
327 std->pl_virt = kmalloc(pl_len, mem_flags);
328 if (std->pl_virt == NULL)
329 return -ENOMEM;
330 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
331
332 for (p = 0; p < std->num_pointers; p++) {
333 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
334 dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE);
335 }
336
337 return 0;
338}
339
340/**
341 * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
342 */
343static void urb_dequeue_work(struct work_struct *work)
344{
345 struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
346 struct whc_qset *qset = wurb->qset;
347 struct whc *whc = qset->whc;
348 unsigned long flags;
349
350 if (wurb->is_async == true)
351 asl_update(whc, WUSBCMD_ASYNC_UPDATED
352 | WUSBCMD_ASYNC_SYNCED_DB
353 | WUSBCMD_ASYNC_QSET_RM);
354 else
355 pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
356 | WUSBCMD_PERIODIC_SYNCED_DB
357 | WUSBCMD_PERIODIC_QSET_RM);
358
359 spin_lock_irqsave(&whc->lock, flags);
360 qset_remove_urb(whc, qset, wurb->urb, wurb->status);
361 spin_unlock_irqrestore(&whc->lock, flags);
362}
363
364/**
365 * qset_add_urb - add an urb to the qset's queue.
366 *
367 * The URB is chopped into sTDs, one for each qTD that will required.
368 * At least one qTD (and sTD) is required even if the transfer has no
369 * data (e.g., for some control transfers).
370 */
371int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
372 gfp_t mem_flags)
373{
374 struct whc_urb *wurb;
375 int remaining = urb->transfer_buffer_length;
376 u64 transfer_dma = urb->transfer_dma;
377 int ntds_remaining;
378
379 ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
380 if (ntds_remaining == 0)
381 ntds_remaining = 1;
382
383 wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
384 if (wurb == NULL)
385 goto err_no_mem;
386 urb->hcpriv = wurb;
387 wurb->qset = qset;
388 wurb->urb = urb;
389 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
390
391 while (ntds_remaining) {
392 struct whc_std *std;
393 size_t std_len;
394
395 std = kmalloc(sizeof(struct whc_std), mem_flags);
396 if (std == NULL)
397 goto err_no_mem;
398
399 std_len = remaining;
400 if (std_len > QTD_MAX_XFER_SIZE)
401 std_len = QTD_MAX_XFER_SIZE;
402
403 std->urb = urb;
404 std->dma_addr = transfer_dma;
405 std->len = std_len;
406 std->ntds_remaining = ntds_remaining;
407 std->qtd = NULL;
408
409 INIT_LIST_HEAD(&std->list_node);
410 list_add_tail(&std->list_node, &qset->stds);
411
412 if (std_len > WHCI_PAGE_SIZE) {
413 if (qset_fill_page_list(whc, std, mem_flags) < 0)
414 goto err_no_mem;
415 } else
416 std->num_pointers = 0;
417
418 ntds_remaining--;
419 remaining -= std_len;
420 transfer_dma += std_len;
421 }
422
423 return 0;
424
425err_no_mem:
426 qset_free_stds(qset, urb);
427 return -ENOMEM;
428}
429
430/**
431 * qset_remove_urb - remove an URB from the urb queue.
432 *
433 * The URB is returned to the USB subsystem.
434 */
435void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
436 struct urb *urb, int status)
437{
438 struct wusbhc *wusbhc = &whc->wusbhc;
439 struct whc_urb *wurb = urb->hcpriv;
440
441 usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
442 /* Drop the lock as urb->complete() may enqueue another urb. */
443 spin_unlock(&whc->lock);
444 wusbhc_giveback_urb(wusbhc, urb, status);
445 spin_lock(&whc->lock);
446
447 kfree(wurb);
448}
449
450/**
451 * get_urb_status_from_qtd - get the completed urb status from qTD status
452 * @urb: completed urb
453 * @status: qTD status
454 */
455static int get_urb_status_from_qtd(struct urb *urb, u32 status)
456{
457 if (status & QTD_STS_HALTED) {
458 if (status & QTD_STS_DBE)
459 return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
460 else if (status & QTD_STS_BABBLE)
461 return -EOVERFLOW;
462 else if (status & QTD_STS_RCE)
463 return -ETIME;
464 return -EPIPE;
465 }
466 if (usb_pipein(urb->pipe)
467 && (urb->transfer_flags & URB_SHORT_NOT_OK)
468 && urb->actual_length < urb->transfer_buffer_length)
469 return -EREMOTEIO;
470 return 0;
471}
472
473/**
474 * process_inactive_qtd - process an inactive (but not halted) qTD.
475 *
476 * Update the urb with the transfer bytes from the qTD, if the urb is
477 * completely transfered or (in the case of an IN only) the LPF is
478 * set, then the transfer is complete and the urb should be returned
479 * to the system.
480 */
481void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
482 struct whc_qtd *qtd)
483{
484 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
485 struct urb *urb = std->urb;
486 uint32_t status;
487 bool complete;
488
489 status = le32_to_cpu(qtd->status);
490
491 urb->actual_length += std->len - QTD_STS_TO_LEN(status);
492
493 if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
494 complete = true;
495 else
496 complete = whc_std_last(std);
497
498 qset_remove_qtd(whc, qset);
499 qset_free_std(whc, std);
500
501 /*
502 * Transfers for this URB are complete? Then return it to the
503 * USB subsystem.
504 */
505 if (complete) {
506 qset_remove_qtds(whc, qset, urb);
507 qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
508
509 /*
510 * If iAlt isn't valid then the hardware didn't
511 * advance iCur. Adjust the start and end pointers to
512 * match iCur.
513 */
514 if (!(status & QTD_STS_IALT_VALID))
515 qset->td_start = qset->td_end
516 = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
517 qset->pause_after_urb = NULL;
518 }
519}
520
521/**
522 * process_halted_qtd - process a qset with a halted qtd
523 *
524 * Remove all the qTDs for the failed URB and return the failed URB to
525 * the USB subsystem. Then remove all other qTDs so the qset can be
526 * removed.
527 *
528 * FIXME: this is the point where rate adaptation can be done. If a
529 * transfer failed because it exceeded the maximum number of retries
530 * then it could be reactivated with a slower rate without having to
531 * remove the qset.
532 */
533void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
534 struct whc_qtd *qtd)
535{
536 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
537 struct urb *urb = std->urb;
538 int urb_status;
539
540 urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
541
542 qset_remove_qtds(whc, qset, urb);
543 qset_remove_urb(whc, qset, urb, urb_status);
544
545 list_for_each_entry(std, &qset->stds, list_node) {
546 if (qset->ntds == 0)
547 break;
548 qset_remove_qtd(whc, qset);
549 std->qtd = NULL;
550 }
551
552 qset->remove = 1;
553}
554
555void qset_free(struct whc *whc, struct whc_qset *qset)
556{
557 dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
558}
559
560/**
561 * qset_delete - wait for a qset to be unused, then free it.
562 */
563void qset_delete(struct whc *whc, struct whc_qset *qset)
564{
565 wait_for_completion(&qset->remove_complete);
566 qset_free(whc, qset);
567}
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h
new file mode 100644
index 000000000000..1d2a53bd39fd
--- /dev/null
+++ b/drivers/usb/host/whci/whcd.h
@@ -0,0 +1,197 @@
1/*
2 * Wireless Host Controller (WHC) private header.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20#ifndef __WHCD_H
21#define __WHCD_H
22
23#include <linux/uwb/whci.h>
24#include <linux/workqueue.h>
25
26#include "whci-hc.h"
27
28/* Generic command timeout. */
29#define WHC_GENCMD_TIMEOUT_MS 100
30
31
32struct whc {
33 struct wusbhc wusbhc;
34 struct umc_dev *umc;
35
36 resource_size_t base_phys;
37 void __iomem *base;
38 int irq;
39
40 u8 n_devices;
41 u8 n_keys;
42 u8 n_mmc_ies;
43
44 u64 *pz_list;
45 struct dn_buf_entry *dn_buf;
46 struct di_buf_entry *di_buf;
47 dma_addr_t pz_list_dma;
48 dma_addr_t dn_buf_dma;
49 dma_addr_t di_buf_dma;
50
51 spinlock_t lock;
52 struct mutex mutex;
53
54 void * gen_cmd_buf;
55 dma_addr_t gen_cmd_buf_dma;
56 wait_queue_head_t cmd_wq;
57
58 struct workqueue_struct *workqueue;
59 struct work_struct dn_work;
60
61 struct dma_pool *qset_pool;
62
63 struct list_head async_list;
64 struct list_head async_removed_list;
65 wait_queue_head_t async_list_wq;
66 struct work_struct async_work;
67
68 struct list_head periodic_list[5];
69 struct list_head periodic_removed_list;
70 wait_queue_head_t periodic_list_wq;
71 struct work_struct periodic_work;
72};
73
74#define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc))
75
76/**
77 * struct whc_std - a software TD.
78 * @urb: the URB this sTD is for.
79 * @offset: start of the URB's data for this TD.
80 * @len: the length of data in the associated TD.
81 * @ntds_remaining: number of TDs (starting from this one) in this transfer.
82 *
83 * Queued URBs may require more TDs than are available in a qset so we
84 * use a list of these "software TDs" (sTDs) to hold per-TD data.
85 */
86struct whc_std {
87 struct urb *urb;
88 size_t len;
89 int ntds_remaining;
90 struct whc_qtd *qtd;
91
92 struct list_head list_node;
93 int num_pointers;
94 dma_addr_t dma_addr;
95 struct whc_page_list_entry *pl_virt;
96};
97
98/**
99 * struct whc_urb - per URB host controller structure.
100 * @urb: the URB this struct is for.
101 * @qset: the qset associated to the URB.
102 * @dequeue_work: the work to remove the URB when dequeued.
103 * @is_async: the URB belongs to async sheduler or not.
104 * @status: the status to be returned when calling wusbhc_giveback_urb.
105 */
106struct whc_urb {
107 struct urb *urb;
108 struct whc_qset *qset;
109 struct work_struct dequeue_work;
110 bool is_async;
111 int status;
112};
113
114/**
115 * whc_std_last - is this sTD the URB's last?
116 * @std: the sTD to check.
117 */
118static inline bool whc_std_last(struct whc_std *std)
119{
120 return std->ntds_remaining <= 1;
121}
122
123enum whc_update {
124 WHC_UPDATE_ADDED = 0x01,
125 WHC_UPDATE_REMOVED = 0x02,
126 WHC_UPDATE_UPDATED = 0x04,
127};
128
129/* init.c */
130int whc_init(struct whc *whc);
131void whc_clean_up(struct whc *whc);
132
133/* hw.c */
134void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val);
135int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len);
136
137/* wusb.c */
138int whc_wusbhc_start(struct wusbhc *wusbhc);
139void whc_wusbhc_stop(struct wusbhc *wusbhc);
140int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
141 u8 handle, struct wuie_hdr *wuie);
142int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle);
143int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm);
144int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
145int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots);
146int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
147 const void *ptk, size_t key_size);
148int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
149 const void *gtk, size_t key_size);
150int whc_set_cluster_id(struct whc *whc, u8 bcid);
151
152/* int.c */
153irqreturn_t whc_int_handler(struct usb_hcd *hcd);
154void whc_dn_work(struct work_struct *work);
155
156/* asl.c */
157void asl_start(struct whc *whc);
158void asl_stop(struct whc *whc);
159int asl_init(struct whc *whc);
160void asl_clean_up(struct whc *whc);
161int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
162int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
163void asl_qset_delete(struct whc *whc, struct whc_qset *qset);
164void scan_async_work(struct work_struct *work);
165
166/* pzl.c */
167int pzl_init(struct whc *whc);
168void pzl_clean_up(struct whc *whc);
169void pzl_start(struct whc *whc);
170void pzl_stop(struct whc *whc);
171int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
172int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
173void pzl_qset_delete(struct whc *whc, struct whc_qset *qset);
174void scan_periodic_work(struct work_struct *work);
175
176/* qset.c */
177struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags);
178void qset_free(struct whc *whc, struct whc_qset *qset);
179struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags);
180void qset_delete(struct whc *whc, struct whc_qset *qset);
181void qset_clear(struct whc *whc, struct whc_qset *qset);
182int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
183 gfp_t mem_flags);
184void qset_free_std(struct whc *whc, struct whc_std *std);
185void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
186 struct urb *urb, int status);
187void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
188 struct whc_qtd *qtd);
189void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
190 struct whc_qtd *qtd);
191enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset);
192void qset_remove_complete(struct whc *whc, struct whc_qset *qset);
193void dump_qset(struct whc_qset *qset, struct device *dev);
194void pzl_update(struct whc *whc, uint32_t wusbcmd);
195void asl_update(struct whc *whc, uint32_t wusbcmd);
196
197#endif /* #ifndef __WHCD_H */
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h
new file mode 100644
index 000000000000..bff1eb7a35cf
--- /dev/null
+++ b/drivers/usb/host/whci/whci-hc.h
@@ -0,0 +1,416 @@
1/*
2 * Wireless Host Controller (WHC) data structures.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
19 */
20#ifndef _WHCI_WHCI_HC_H
21#define _WHCI_WHCI_HC_H
22
23#include <linux/list.h>
24
25/**
26 * WHCI_PAGE_SIZE - page size use by WHCI
27 *
28 * WHCI assumes that host system uses pages of 4096 octets.
29 */
30#define WHCI_PAGE_SIZE 4096
31
32
33/**
34 * QTD_MAX_TXFER_SIZE - max number of bytes to transfer with a single
35 * qtd.
36 *
37 * This is 2^20 - 1.
38 */
39#define QTD_MAX_XFER_SIZE 1048575
40
41
42/**
43 * struct whc_qtd - Queue Element Transfer Descriptors (qTD)
44 *
45 * This describes the data for a bulk, control or interrupt transfer.
46 *
47 * [WHCI] section 3.2.4
48 */
49struct whc_qtd {
50 __le32 status; /*< remaining transfer len and transfer status */
51 __le32 options;
52 __le64 page_list_ptr; /*< physical pointer to data buffer page list*/
53 __u8 setup[8]; /*< setup data for control transfers */
54} __attribute__((packed));
55
56#define QTD_STS_ACTIVE (1 << 31) /* enable execution of transaction */
57#define QTD_STS_HALTED (1 << 30) /* transfer halted */
58#define QTD_STS_DBE (1 << 29) /* data buffer error */
59#define QTD_STS_BABBLE (1 << 28) /* babble detected */
60#define QTD_STS_RCE (1 << 27) /* retry count exceeded */
61#define QTD_STS_LAST_PKT (1 << 26) /* set Last Packet Flag in WUSB header */
62#define QTD_STS_INACTIVE (1 << 25) /* queue set is marked inactive */
63#define QTD_STS_IALT_VALID (1 << 23) /* iAlt field is valid */
64#define QTD_STS_IALT(i) (QTD_STS_IALT_VALID | ((i) << 20)) /* iAlt field */
65#define QTD_STS_LEN(l) ((l) << 0) /* transfer length */
66#define QTD_STS_TO_LEN(s) ((s) & 0x000fffff)
67
68#define QTD_OPT_IOC (1 << 1) /* page_list_ptr points to buffer directly */
69#define QTD_OPT_SMALL (1 << 0) /* interrupt on complete */
70
71/**
72 * struct whc_itd - Isochronous Queue Element Transfer Descriptors (iTD)
73 *
74 * This describes the data and other parameters for an isochronous
75 * transfer.
76 *
77 * [WHCI] section 3.2.5
78 */
79struct whc_itd {
80 __le16 presentation_time; /*< presentation time for OUT transfers */
81 __u8 num_segments; /*< number of data segments in segment list */
82 __u8 status; /*< command execution status */
83 __le32 options; /*< misc transfer options */
84 __le64 page_list_ptr; /*< physical pointer to data buffer page list */
85 __le64 seg_list_ptr; /*< physical pointer to segment list */
86} __attribute__((packed));
87
88#define ITD_STS_ACTIVE (1 << 7) /* enable execution of transaction */
89#define ITD_STS_DBE (1 << 5) /* data buffer error */
90#define ITD_STS_BABBLE (1 << 4) /* babble detected */
91#define ITD_STS_INACTIVE (1 << 1) /* queue set is marked inactive */
92
93#define ITD_OPT_IOC (1 << 1) /* interrupt on complete */
94#define ITD_OPT_SMALL (1 << 0) /* page_list_ptr points to buffer directly */
95
96/**
97 * Page list entry.
98 *
99 * A TD's page list must contain sufficient page list entries for the
100 * total data length in the TD.
101 *
102 * [WHCI] section 3.2.4.3
103 */
104struct whc_page_list_entry {
105 __le64 buf_ptr; /*< physical pointer to buffer */
106} __attribute__((packed));
107
108/**
109 * struct whc_seg_list_entry - Segment list entry.
110 *
111 * Describes a portion of the data buffer described in the containing
112 * qTD's page list.
113 *
114 * seg_ptr = qtd->page_list_ptr[qtd->seg_list_ptr[seg].idx].buf_ptr
115 * + qtd->seg_list_ptr[seg].offset;
116 *
117 * Segments can't cross page boundries.
118 *
119 * [WHCI] section 3.2.5.5
120 */
121struct whc_seg_list_entry {
122 __le16 len; /*< segment length */
123 __u8 idx; /*< index into page list */
124 __u8 status; /*< segment status */
125 __le16 offset; /*< 12 bit offset into page */
126} __attribute__((packed));
127
128/**
129 * struct whc_qhead - endpoint and status information for a qset.
130 *
131 * [WHCI] section 3.2.6
132 */
133struct whc_qhead {
134 __le64 link; /*< next qset in list */
135 __le32 info1;
136 __le32 info2;
137 __le32 info3;
138 __le16 status;
139 __le16 err_count; /*< transaction error count */
140 __le32 cur_window;
141 __le32 scratch[3]; /*< h/w scratch area */
142 union {
143 struct whc_qtd qtd;
144 struct whc_itd itd;
145 } overlay;
146} __attribute__((packed));
147
148#define QH_LINK_PTR_MASK (~0x03Full)
149#define QH_LINK_PTR(ptr) ((ptr) & QH_LINK_PTR_MASK)
150#define QH_LINK_IQS (1 << 4) /* isochronous queue set */
151#define QH_LINK_NTDS(n) (((n) - 1) << 1) /* number of TDs in queue set */
152#define QH_LINK_T (1 << 0) /* last queue set in periodic schedule list */
153
154#define QH_INFO1_EP(e) ((e) << 0) /* endpoint number */
155#define QH_INFO1_DIR_IN (1 << 4) /* IN transfer */
156#define QH_INFO1_DIR_OUT (0 << 4) /* OUT transfer */
157#define QH_INFO1_TR_TYPE_CTRL (0x0 << 5) /* control transfer */
158#define QH_INFO1_TR_TYPE_ISOC (0x1 << 5) /* isochronous transfer */
159#define QH_INFO1_TR_TYPE_BULK (0x2 << 5) /* bulk transfer */
160#define QH_INFO1_TR_TYPE_INT (0x3 << 5) /* interrupt */
161#define QH_INFO1_TR_TYPE_LP_INT (0x7 << 5) /* low power interrupt */
162#define QH_INFO1_DEV_INFO_IDX(i) ((i) << 8) /* index into device info buffer */
163#define QH_INFO1_SET_INACTIVE (1 << 15) /* set inactive after transfer */
164#define QH_INFO1_MAX_PKT_LEN(l) ((l) << 16) /* maximum packet length */
165
166#define QH_INFO2_BURST(b) ((b) << 0) /* maximum burst length */
167#define QH_INFO2_DBP(p) ((p) << 5) /* data burst policy (see [WUSB] table 5-7) */
168#define QH_INFO2_MAX_COUNT(c) ((c) << 8) /* max isoc/int pkts per zone */
169#define QH_INFO2_RQS (1 << 15) /* reactivate queue set */
170#define QH_INFO2_MAX_RETRY(r) ((r) << 16) /* maximum transaction retries */
171#define QH_INFO2_MAX_SEQ(s) ((s) << 20) /* maximum sequence number */
172#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */
173#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */
174
175#define QH_INFO3_TX_RATE_53_3 (0 << 24)
176#define QH_INFO3_TX_RATE_80 (1 << 24)
177#define QH_INFO3_TX_RATE_106_7 (2 << 24)
178#define QH_INFO3_TX_RATE_160 (3 << 24)
179#define QH_INFO3_TX_RATE_200 (4 << 24)
180#define QH_INFO3_TX_RATE_320 (5 << 24)
181#define QH_INFO3_TX_RATE_400 (6 << 24)
182#define QH_INFO3_TX_RATE_480 (7 << 24)
183#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
184
185#define QH_STATUS_FLOW_CTRL (1 << 15)
186#define QH_STATUS_ICUR(i) ((i) << 5)
187#define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7)
188
189/**
190 * usb_pipe_to_qh_type - USB core pipe type to QH transfer type
191 *
192 * Returns the QH type field for a USB core pipe type.
193 */
194static inline unsigned usb_pipe_to_qh_type(unsigned pipe)
195{
196 static const unsigned type[] = {
197 [PIPE_ISOCHRONOUS] = QH_INFO1_TR_TYPE_ISOC,
198 [PIPE_INTERRUPT] = QH_INFO1_TR_TYPE_INT,
199 [PIPE_CONTROL] = QH_INFO1_TR_TYPE_CTRL,
200 [PIPE_BULK] = QH_INFO1_TR_TYPE_BULK,
201 };
202 return type[usb_pipetype(pipe)];
203}
204
205/**
206 * Maxiumum number of TDs in a qset.
207 */
208#define WHCI_QSET_TD_MAX 8
209
210/**
211 * struct whc_qset - WUSB data transfers to a specific endpoint
212 * @qh: the QHead of this qset
213 * @qtd: up to 8 qTDs (for qsets for control, bulk and interrupt
214 * transfers)
215 * @itd: up to 8 iTDs (for qsets for isochronous transfers)
216 * @qset_dma: DMA address for this qset
217 * @whc: WHCI HC this qset is for
218 * @ep: endpoint
219 * @stds: list of sTDs queued to this qset
220 * @ntds: number of qTDs queued (not necessarily the same as nTDs
221 * field in the QH)
222 * @td_start: index of the first qTD in the list
223 * @td_end: index of next free qTD in the list (provided
224 * ntds < WHCI_QSET_TD_MAX)
225 *
226 * Queue Sets (qsets) are added to the asynchronous schedule list
227 * (ASL) or the periodic zone list (PZL).
228 *
229 * qsets may contain up to 8 TDs (either qTDs or iTDs as appropriate).
230 * Each TD may refer to at most 1 MiB of data. If a single transfer
231 * has > 8MiB of data, TDs can be reused as they are completed since
232 * the TD list is used as a circular buffer. Similarly, several
233 * (smaller) transfers may be queued in a qset.
234 *
235 * WHCI controllers may cache portions of the qsets in the ASL and
236 * PZL, requiring the WHCD to inform the WHC that the lists have been
237 * updated (fields changed or qsets inserted or removed). For safe
238 * insertion and removal of qsets from the lists the schedule must be
239 * stopped to avoid races in updating the QH link pointers.
240 *
241 * Since the HC is free to execute qsets in any order, all transfers
242 * to an endpoint should use the same qset to ensure transfers are
243 * executed in the order they're submitted.
244 *
245 * [WHCI] section 3.2.3
246 */
247struct whc_qset {
248 struct whc_qhead qh;
249 union {
250 struct whc_qtd qtd[WHCI_QSET_TD_MAX];
251 struct whc_itd itd[WHCI_QSET_TD_MAX];
252 };
253
254 /* private data for WHCD */
255 dma_addr_t qset_dma;
256 struct whc *whc;
257 struct usb_host_endpoint *ep;
258 struct list_head stds;
259 int ntds;
260 int td_start;
261 int td_end;
262 struct list_head list_node;
263 unsigned in_sw_list:1;
264 unsigned in_hw_list:1;
265 unsigned remove:1;
266 struct urb *pause_after_urb;
267 struct completion remove_complete;
268 int max_burst;
269 int max_seq;
270};
271
272static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
273{
274 if (target)
275 *ptr = (*ptr & ~(QH_LINK_PTR_MASK | QH_LINK_T)) | QH_LINK_PTR(target);
276 else
277 *ptr = QH_LINK_T;
278}
279
280/**
281 * struct di_buf_entry - Device Information (DI) buffer entry.
282 *
283 * There's one of these per connected device.
284 */
285struct di_buf_entry {
286 __le32 availability_info[8]; /*< MAS availability information, one MAS per bit */
287 __le32 addr_sec_info; /*< addressing and security info */
288 __le32 reserved[7];
289} __attribute__((packed));
290
291#define WHC_DI_SECURE (1 << 31)
292#define WHC_DI_DISABLE (1 << 30)
293#define WHC_DI_KEY_IDX(k) ((k) << 8)
294#define WHC_DI_KEY_IDX_MASK 0x0000ff00
295#define WHC_DI_DEV_ADDR(a) ((a) << 0)
296#define WHC_DI_DEV_ADDR_MASK 0x000000ff
297
298/**
299 * struct dn_buf_entry - Device Notification (DN) buffer entry.
300 *
301 * [WHCI] section 3.2.8
302 */
303struct dn_buf_entry {
304 __u8 msg_size; /*< number of octets of valid DN data */
305 __u8 reserved1;
306 __u8 src_addr; /*< source address */
307 __u8 status; /*< buffer entry status */
308 __le32 tkid; /*< TKID for source device, valid if secure bit is set */
309 __u8 dn_data[56]; /*< up to 56 octets of DN data */
310} __attribute__((packed));
311
312#define WHC_DN_STATUS_VALID (1 << 7) /* buffer entry is valid */
313#define WHC_DN_STATUS_SECURE (1 << 6) /* notification received using secure frame */
314
315#define WHC_N_DN_ENTRIES (4096 / sizeof(struct dn_buf_entry))
316
317/* The Add MMC IE WUSB Generic Command may take up to 256 bytes of
318 data. [WHCI] section 2.4.7. */
319#define WHC_GEN_CMD_DATA_LEN 256
320
321/*
322 * HC registers.
323 *
324 * [WHCI] section 2.4
325 */
326
327#define WHCIVERSION 0x00
328
329#define WHCSPARAMS 0x04
330# define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff)
331# define WHCSPARAMS_TO_N_KEYS(p) (((p) >> 8) & 0xff)
332# define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f)
333
334#define WUSBCMD 0x08
335# define WUSBCMD_BCID(b) ((b) << 16)
336# define WUSBCMD_BCID_MASK (0xff << 16)
337# define WUSBCMD_ASYNC_QSET_RM (1 << 12)
338# define WUSBCMD_PERIODIC_QSET_RM (1 << 11)
339# define WUSBCMD_WUSBSI(s) ((s) << 8)
340# define WUSBCMD_WUSBSI_MASK (0x7 << 8)
341# define WUSBCMD_ASYNC_SYNCED_DB (1 << 7)
342# define WUSBCMD_PERIODIC_SYNCED_DB (1 << 6)
343# define WUSBCMD_ASYNC_UPDATED (1 << 5)
344# define WUSBCMD_PERIODIC_UPDATED (1 << 4)
345# define WUSBCMD_ASYNC_EN (1 << 3)
346# define WUSBCMD_PERIODIC_EN (1 << 2)
347# define WUSBCMD_WHCRESET (1 << 1)
348# define WUSBCMD_RUN (1 << 0)
349
350#define WUSBSTS 0x0c
351# define WUSBSTS_ASYNC_SCHED (1 << 15)
352# define WUSBSTS_PERIODIC_SCHED (1 << 14)
353# define WUSBSTS_DNTS_SCHED (1 << 13)
354# define WUSBSTS_HCHALTED (1 << 12)
355# define WUSBSTS_GEN_CMD_DONE (1 << 9)
356# define WUSBSTS_CHAN_TIME_ROLLOVER (1 << 8)
357# define WUSBSTS_DNTS_OVERFLOW (1 << 7)
358# define WUSBSTS_BPST_ADJUSTMENT_CHANGED (1 << 6)
359# define WUSBSTS_HOST_ERR (1 << 5)
360# define WUSBSTS_ASYNC_SCHED_SYNCED (1 << 4)
361# define WUSBSTS_PERIODIC_SCHED_SYNCED (1 << 3)
362# define WUSBSTS_DNTS_INT (1 << 2)
363# define WUSBSTS_ERR_INT (1 << 1)
364# define WUSBSTS_INT (1 << 0)
365# define WUSBSTS_INT_MASK 0x3ff
366
367#define WUSBINTR 0x10
368# define WUSBINTR_GEN_CMD_DONE (1 << 9)
369# define WUSBINTR_CHAN_TIME_ROLLOVER (1 << 8)
370# define WUSBINTR_DNTS_OVERFLOW (1 << 7)
371# define WUSBINTR_BPST_ADJUSTMENT_CHANGED (1 << 6)
372# define WUSBINTR_HOST_ERR (1 << 5)
373# define WUSBINTR_ASYNC_SCHED_SYNCED (1 << 4)
374# define WUSBINTR_PERIODIC_SCHED_SYNCED (1 << 3)
375# define WUSBINTR_DNTS_INT (1 << 2)
376# define WUSBINTR_ERR_INT (1 << 1)
377# define WUSBINTR_INT (1 << 0)
378# define WUSBINTR_ALL 0x3ff
379
380#define WUSBGENCMDSTS 0x14
381# define WUSBGENCMDSTS_ACTIVE (1 << 31)
382# define WUSBGENCMDSTS_ERROR (1 << 24)
383# define WUSBGENCMDSTS_IOC (1 << 23)
384# define WUSBGENCMDSTS_MMCIE_ADD 0x01
385# define WUSBGENCMDSTS_MMCIE_RM 0x02
386# define WUSBGENCMDSTS_SET_MAS 0x03
387# define WUSBGENCMDSTS_CHAN_STOP 0x04
388# define WUSBGENCMDSTS_RWP_EN 0x05
389
390#define WUSBGENCMDPARAMS 0x18
391#define WUSBGENADDR 0x20
392#define WUSBASYNCLISTADDR 0x28
393#define WUSBDNTSBUFADDR 0x30
394#define WUSBDEVICEINFOADDR 0x38
395
396#define WUSBSETSECKEYCMD 0x40
397# define WUSBSETSECKEYCMD_SET (1 << 31)
398# define WUSBSETSECKEYCMD_ERASE (1 << 30)
399# define WUSBSETSECKEYCMD_GTK (1 << 8)
400# define WUSBSETSECKEYCMD_IDX(i) ((i) << 0)
401
402#define WUSBTKID 0x44
403#define WUSBSECKEY 0x48
404#define WUSBPERIODICLISTBASE 0x58
405#define WUSBMASINDEX 0x60
406
407#define WUSBDNTSCTRL 0x64
408# define WUSBDNTSCTRL_ACTIVE (1 << 31)
409# define WUSBDNTSCTRL_INTERVAL(i) ((i) << 8)
410# define WUSBDNTSCTRL_SLOTS(s) ((s) << 0)
411
412#define WUSBTIME 0x68
413#define WUSBBPST 0x6c
414#define WUSBDIBUPDATED 0x70
415
416#endif /* #ifndef _WHCI_WHCI_HC_H */
diff --git a/drivers/usb/host/whci/wusb.c b/drivers/usb/host/whci/wusb.c
new file mode 100644
index 000000000000..66e4ddcd961d
--- /dev/null
+++ b/drivers/usb/host/whci/wusb.c
@@ -0,0 +1,241 @@
1/*
2 * Wireless Host Controller (WHC) WUSB operations.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/uwb/umc.h>
22#define D_LOCAL 1
23#include <linux/uwb/debug.h>
24
25#include "../../wusbcore/wusbhc.h"
26
27#include "whcd.h"
28
29#if D_LOCAL >= 1
30static void dump_di(struct whc *whc, int idx)
31{
32 struct di_buf_entry *di = &whc->di_buf[idx];
33 struct device *dev = &whc->umc->dev;
34 char buf[128];
35
36 bitmap_scnprintf(buf, sizeof(buf), (unsigned long *)di->availability_info, UWB_NUM_MAS);
37
38 d_printf(1, dev, "DI[%d]\n", idx);
39 d_printf(1, dev, " availability: %s\n", buf);
40 d_printf(1, dev, " %c%c key idx: %d dev addr: %d\n",
41 (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ',
42 (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ',
43 (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8,
44 (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK));
45}
46#else
47static inline void dump_di(struct whc *whc, int idx)
48{
49}
50#endif
51
52static int whc_update_di(struct whc *whc, int idx)
53{
54 int offset = idx / 32;
55 u32 bit = 1 << (idx % 32);
56
57 dump_di(whc, idx);
58
59 le_writel(bit, whc->base + WUSBDIBUPDATED + offset);
60
61 return whci_wait_for(&whc->umc->dev,
62 whc->base + WUSBDIBUPDATED + offset, bit, 0,
63 100, "DI update");
64}
65
66/*
67 * WHCI starts and stops MMCs based on there being a valid GTK so
68 * these need only start/stop the asynchronous and periodic schedules.
69 */
70
71int whc_wusbhc_start(struct wusbhc *wusbhc)
72{
73 struct whc *whc = wusbhc_to_whc(wusbhc);
74
75 asl_start(whc);
76 pzl_start(whc);
77
78 return 0;
79}
80
81void whc_wusbhc_stop(struct wusbhc *wusbhc)
82{
83 struct whc *whc = wusbhc_to_whc(wusbhc);
84
85 pzl_stop(whc);
86 asl_stop(whc);
87}
88
89int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
90 u8 handle, struct wuie_hdr *wuie)
91{
92 struct whc *whc = wusbhc_to_whc(wusbhc);
93 u32 params;
94
95 params = (interval << 24)
96 | (repeat_cnt << 16)
97 | (wuie->bLength << 8)
98 | handle;
99
100 return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength);
101}
102
103int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
104{
105 struct whc *whc = wusbhc_to_whc(wusbhc);
106 u32 params;
107
108 params = handle;
109
110 return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0);
111}
112
113int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm)
114{
115 struct whc *whc = wusbhc_to_whc(wusbhc);
116
117 if (stream_index >= 0)
118 whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index));
119
120 return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm));
121}
122
123int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
124{
125 struct whc *whc = wusbhc_to_whc(wusbhc);
126 int idx = wusb_dev->port_idx;
127 struct di_buf_entry *di = &whc->di_buf[idx];
128 int ret;
129
130 mutex_lock(&whc->mutex);
131
132 uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability);
133 di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK);
134 di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr);
135
136 ret = whc_update_di(whc, idx);
137
138 mutex_unlock(&whc->mutex);
139
140 return ret;
141}
142
143/*
144 * Set the number of Device Notification Time Slots (DNTS) and enable
145 * device notifications.
146 */
147int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
148{
149 struct whc *whc = wusbhc_to_whc(wusbhc);
150 u32 dntsctrl;
151
152 dntsctrl = WUSBDNTSCTRL_ACTIVE
153 | WUSBDNTSCTRL_INTERVAL(interval)
154 | WUSBDNTSCTRL_SLOTS(slots);
155
156 le_writel(dntsctrl, whc->base + WUSBDNTSCTRL);
157
158 return 0;
159}
160
161static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid,
162 const void *key, size_t key_size, bool is_gtk)
163{
164 uint32_t setkeycmd;
165 uint32_t seckey[4];
166 int i;
167 int ret;
168
169 memcpy(seckey, key, key_size);
170 setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index);
171 if (is_gtk)
172 setkeycmd |= WUSBSETSECKEYCMD_GTK;
173
174 le_writel(tkid, whc->base + WUSBTKID);
175 for (i = 0; i < 4; i++)
176 le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i);
177 le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD);
178
179 ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD,
180 WUSBSETSECKEYCMD_SET, 0, 100, "set key");
181
182 return ret;
183}
184
185/**
186 * whc_set_ptk - set the PTK to use for a device.
187 *
188 * The index into the key table for this PTK is the same as the
189 * device's port index.
190 */
191int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
192 const void *ptk, size_t key_size)
193{
194 struct whc *whc = wusbhc_to_whc(wusbhc);
195 struct di_buf_entry *di = &whc->di_buf[port_idx];
196 int ret;
197
198 mutex_lock(&whc->mutex);
199
200 if (ptk) {
201 ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false);
202 if (ret)
203 goto out;
204
205 di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK;
206 di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx);
207 } else
208 di->addr_sec_info &= ~WHC_DI_SECURE;
209
210 ret = whc_update_di(whc, port_idx);
211out:
212 mutex_unlock(&whc->mutex);
213 return ret;
214}
215
216/**
217 * whc_set_gtk - set the GTK for subsequent broadcast packets
218 *
219 * The GTK is stored in the last entry in the key table (the previous
220 * N_DEVICES entries are for the per-device PTKs).
221 */
222int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
223 const void *gtk, size_t key_size)
224{
225 struct whc *whc = wusbhc_to_whc(wusbhc);
226 int ret;
227
228 mutex_lock(&whc->mutex);
229
230 ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true);
231
232 mutex_unlock(&whc->mutex);
233
234 return ret;
235}
236
237int whc_set_cluster_id(struct whc *whc, u8 bcid)
238{
239 whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid));
240 return 0;
241}
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index b358c4e1cf21..444c69c447be 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1561,8 +1561,7 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1561 if (code != USBTEST_REQUEST) 1561 if (code != USBTEST_REQUEST)
1562 return -EOPNOTSUPP; 1562 return -EOPNOTSUPP;
1563 1563
1564 if (param->iterations <= 0 || param->length < 0 1564 if (param->iterations <= 0)
1565 || param->sglen < 0 || param->vary < 0)
1566 return -EINVAL; 1565 return -EINVAL;
1567 1566
1568 if (mutex_lock_interruptible(&dev->lock)) 1567 if (mutex_lock_interruptible(&dev->lock))
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3d87eabcd922..bd07eaa300b9 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -95,11 +95,20 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
95#define HUAWEI_PRODUCT_E220 0x1003 95#define HUAWEI_PRODUCT_E220 0x1003
96#define HUAWEI_PRODUCT_E220BIS 0x1004 96#define HUAWEI_PRODUCT_E220BIS 0x1004
97#define HUAWEI_PRODUCT_E1401 0x1401 97#define HUAWEI_PRODUCT_E1401 0x1401
98#define HUAWEI_PRODUCT_E1402 0x1402
98#define HUAWEI_PRODUCT_E1403 0x1403 99#define HUAWEI_PRODUCT_E1403 0x1403
100#define HUAWEI_PRODUCT_E1404 0x1404
99#define HUAWEI_PRODUCT_E1405 0x1405 101#define HUAWEI_PRODUCT_E1405 0x1405
100#define HUAWEI_PRODUCT_E1406 0x1406 102#define HUAWEI_PRODUCT_E1406 0x1406
103#define HUAWEI_PRODUCT_E1407 0x1407
101#define HUAWEI_PRODUCT_E1408 0x1408 104#define HUAWEI_PRODUCT_E1408 0x1408
102#define HUAWEI_PRODUCT_E1409 0x1409 105#define HUAWEI_PRODUCT_E1409 0x1409
106#define HUAWEI_PRODUCT_E140A 0x140A
107#define HUAWEI_PRODUCT_E140B 0x140B
108#define HUAWEI_PRODUCT_E140C 0x140C
109#define HUAWEI_PRODUCT_E140D 0x140D
110#define HUAWEI_PRODUCT_E140E 0x140E
111#define HUAWEI_PRODUCT_E140F 0x140F
103#define HUAWEI_PRODUCT_E1410 0x1410 112#define HUAWEI_PRODUCT_E1410 0x1410
104#define HUAWEI_PRODUCT_E1411 0x1411 113#define HUAWEI_PRODUCT_E1411 0x1411
105#define HUAWEI_PRODUCT_E1412 0x1412 114#define HUAWEI_PRODUCT_E1412 0x1412
@@ -110,6 +119,44 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
110#define HUAWEI_PRODUCT_E1417 0x1417 119#define HUAWEI_PRODUCT_E1417 0x1417
111#define HUAWEI_PRODUCT_E1418 0x1418 120#define HUAWEI_PRODUCT_E1418 0x1418
112#define HUAWEI_PRODUCT_E1419 0x1419 121#define HUAWEI_PRODUCT_E1419 0x1419
122#define HUAWEI_PRODUCT_E141A 0x141A
123#define HUAWEI_PRODUCT_E141B 0x141B
124#define HUAWEI_PRODUCT_E141C 0x141C
125#define HUAWEI_PRODUCT_E141D 0x141D
126#define HUAWEI_PRODUCT_E141E 0x141E
127#define HUAWEI_PRODUCT_E141F 0x141F
128#define HUAWEI_PRODUCT_E1420 0x1420
129#define HUAWEI_PRODUCT_E1421 0x1421
130#define HUAWEI_PRODUCT_E1422 0x1422
131#define HUAWEI_PRODUCT_E1423 0x1423
132#define HUAWEI_PRODUCT_E1424 0x1424
133#define HUAWEI_PRODUCT_E1425 0x1425
134#define HUAWEI_PRODUCT_E1426 0x1426
135#define HUAWEI_PRODUCT_E1427 0x1427
136#define HUAWEI_PRODUCT_E1428 0x1428
137#define HUAWEI_PRODUCT_E1429 0x1429
138#define HUAWEI_PRODUCT_E142A 0x142A
139#define HUAWEI_PRODUCT_E142B 0x142B
140#define HUAWEI_PRODUCT_E142C 0x142C
141#define HUAWEI_PRODUCT_E142D 0x142D
142#define HUAWEI_PRODUCT_E142E 0x142E
143#define HUAWEI_PRODUCT_E142F 0x142F
144#define HUAWEI_PRODUCT_E1430 0x1430
145#define HUAWEI_PRODUCT_E1431 0x1431
146#define HUAWEI_PRODUCT_E1432 0x1432
147#define HUAWEI_PRODUCT_E1433 0x1433
148#define HUAWEI_PRODUCT_E1434 0x1434
149#define HUAWEI_PRODUCT_E1435 0x1435
150#define HUAWEI_PRODUCT_E1436 0x1436
151#define HUAWEI_PRODUCT_E1437 0x1437
152#define HUAWEI_PRODUCT_E1438 0x1438
153#define HUAWEI_PRODUCT_E1439 0x1439
154#define HUAWEI_PRODUCT_E143A 0x143A
155#define HUAWEI_PRODUCT_E143B 0x143B
156#define HUAWEI_PRODUCT_E143C 0x143C
157#define HUAWEI_PRODUCT_E143D 0x143D
158#define HUAWEI_PRODUCT_E143E 0x143E
159#define HUAWEI_PRODUCT_E143F 0x143F
113 160
114#define NOVATELWIRELESS_VENDOR_ID 0x1410 161#define NOVATELWIRELESS_VENDOR_ID 0x1410
115 162
@@ -207,6 +254,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
207/* ZTE PRODUCTS */ 254/* ZTE PRODUCTS */
208#define ZTE_VENDOR_ID 0x19d2 255#define ZTE_VENDOR_ID 0x19d2
209#define ZTE_PRODUCT_MF628 0x0015 256#define ZTE_PRODUCT_MF628 0x0015
257#define ZTE_PRODUCT_MF626 0x0031
210#define ZTE_PRODUCT_CDMA_TECH 0xfffe 258#define ZTE_PRODUCT_CDMA_TECH 0xfffe
211 259
212/* Ericsson products */ 260/* Ericsson products */
@@ -248,11 +296,20 @@ static struct usb_device_id option_ids[] = {
248 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, 296 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
249 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, 297 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
250 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) }, 298 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) },
299 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) },
251 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) }, 300 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) },
301 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) },
252 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) }, 302 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) },
253 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) }, 303 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) },
304 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) },
254 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) }, 305 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) },
255 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) }, 306 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) },
307 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) },
308 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) },
309 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) },
310 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) },
311 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) },
312 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) },
256 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) }, 313 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) },
257 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) }, 314 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) },
258 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) }, 315 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) },
@@ -263,6 +320,44 @@ static struct usb_device_id option_ids[] = {
263 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) }, 320 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) },
264 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) }, 321 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) },
265 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) }, 322 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) },
323 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) },
324 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) },
325 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) },
326 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) },
327 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) },
328 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) },
329 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) },
330 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) },
331 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) },
332 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) },
333 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) },
334 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) },
335 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) },
336 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) },
337 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) },
338 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) },
339 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) },
340 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) },
341 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) },
342 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) },
343 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) },
344 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) },
345 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) },
346 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) },
347 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) },
348 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) },
349 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) },
350 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) },
351 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) },
352 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) },
353 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) },
354 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) },
355 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) },
356 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) },
357 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) },
358 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
359 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
360 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
266 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) }, 361 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) },
267 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ 362 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
268 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ 363 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
@@ -336,6 +431,7 @@ static struct usb_device_id option_ids[] = {
336 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 431 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
337 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ 432 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
338 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, 433 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
434 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) },
339 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, 435 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
340 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, 436 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) },
341 { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) }, 437 { USB_DEVICE(ERICSSON_VENDOR_ID, ERICSSON_PRODUCT_F3507G) },
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 4995bb595aef..2dd9bd4bff56 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -95,11 +95,10 @@ int usb_stor_huawei_e220_init(struct us_data *us)
95{ 95{
96 int result; 96 int result;
97 97
98 us->iobuf[0] = 0x1;
99 result = usb_stor_control_msg(us, us->send_ctrl_pipe, 98 result = usb_stor_control_msg(us, us->send_ctrl_pipe,
100 USB_REQ_SET_FEATURE, 99 USB_REQ_SET_FEATURE,
101 USB_TYPE_STANDARD | USB_RECIP_DEVICE, 100 USB_TYPE_STANDARD | USB_RECIP_DEVICE,
102 0x01, 0x0, us->iobuf, 0x1, 1000); 101 0x01, 0x0, NULL, 0x0, 1000);
103 US_DEBUGP("usb_control_msg performing result is %d\n", result); 102 US_DEBUGP("usb_control_msg performing result is %d\n", result);
104 return (result ? 0 : -1); 103 return (result ? 0 : -1);
105} 104}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index cd155475cb6e..a2b9ebbef38e 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1628,97 +1628,332 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
1628/* Reported by fangxiaozhi <huananhu@huawei.com> 1628/* Reported by fangxiaozhi <huananhu@huawei.com>
1629 * This brings the HUAWEI data card devices into multi-port mode 1629 * This brings the HUAWEI data card devices into multi-port mode
1630 */ 1630 */
1631UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000, 1631UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
1632 "HUAWEI MOBILE", 1632 "HUAWEI MOBILE",
1633 "Mass Storage", 1633 "Mass Storage",
1634 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1634 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1635 0), 1635 0),
1636UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000, 1636UNUSUAL_DEV( 0x12d1, 0x1003, 0x0000, 0x0000,
1637 "HUAWEI MOBILE", 1637 "HUAWEI MOBILE",
1638 "Mass Storage", 1638 "Mass Storage",
1639 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1639 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1640 0), 1640 0),
1641UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000, 1641UNUSUAL_DEV( 0x12d1, 0x1004, 0x0000, 0x0000,
1642 "HUAWEI MOBILE", 1642 "HUAWEI MOBILE",
1643 "Mass Storage", 1643 "Mass Storage",
1644 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1644 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1645 0), 1645 0),
1646UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000, 1646UNUSUAL_DEV( 0x12d1, 0x1401, 0x0000, 0x0000,
1647 "HUAWEI MOBILE", 1647 "HUAWEI MOBILE",
1648 "Mass Storage", 1648 "Mass Storage",
1649 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1649 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1650 0), 1650 0),
1651UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000, 1651UNUSUAL_DEV( 0x12d1, 0x1402, 0x0000, 0x0000,
1652 "HUAWEI MOBILE", 1652 "HUAWEI MOBILE",
1653 "Mass Storage", 1653 "Mass Storage",
1654 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1654 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1655 0), 1655 0),
1656UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000, 1656UNUSUAL_DEV( 0x12d1, 0x1403, 0x0000, 0x0000,
1657 "HUAWEI MOBILE", 1657 "HUAWEI MOBILE",
1658 "Mass Storage", 1658 "Mass Storage",
1659 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1659 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1660 0), 1660 0),
1661UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000, 1661UNUSUAL_DEV( 0x12d1, 0x1404, 0x0000, 0x0000,
1662 "HUAWEI MOBILE", 1662 "HUAWEI MOBILE",
1663 "Mass Storage", 1663 "Mass Storage",
1664 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1664 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1665 0), 1665 0),
1666UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000, 1666UNUSUAL_DEV( 0x12d1, 0x1405, 0x0000, 0x0000,
1667 "HUAWEI MOBILE", 1667 "HUAWEI MOBILE",
1668 "Mass Storage", 1668 "Mass Storage",
1669 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1669 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1670 0), 1670 0),
1671UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000, 1671UNUSUAL_DEV( 0x12d1, 0x1406, 0x0000, 0x0000,
1672 "HUAWEI MOBILE", 1672 "HUAWEI MOBILE",
1673 "Mass Storage", 1673 "Mass Storage",
1674 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1674 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1675 0), 1675 0),
1676UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000, 1676UNUSUAL_DEV( 0x12d1, 0x1407, 0x0000, 0x0000,
1677 "HUAWEI MOBILE", 1677 "HUAWEI MOBILE",
1678 "Mass Storage", 1678 "Mass Storage",
1679 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1679 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1680 0), 1680 0),
1681UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000, 1681UNUSUAL_DEV( 0x12d1, 0x1408, 0x0000, 0x0000,
1682 "HUAWEI MOBILE", 1682 "HUAWEI MOBILE",
1683 "Mass Storage", 1683 "Mass Storage",
1684 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1684 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1685 0), 1685 0),
1686UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000, 1686UNUSUAL_DEV( 0x12d1, 0x1409, 0x0000, 0x0000,
1687 "HUAWEI MOBILE", 1687 "HUAWEI MOBILE",
1688 "Mass Storage", 1688 "Mass Storage",
1689 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1689 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1690 0), 1690 0),
1691UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000, 1691UNUSUAL_DEV( 0x12d1, 0x140A, 0x0000, 0x0000,
1692 "HUAWEI MOBILE", 1692 "HUAWEI MOBILE",
1693 "Mass Storage", 1693 "Mass Storage",
1694 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1694 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1695 0), 1695 0),
1696UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000, 1696UNUSUAL_DEV( 0x12d1, 0x140B, 0x0000, 0x0000,
1697 "HUAWEI MOBILE", 1697 "HUAWEI MOBILE",
1698 "Mass Storage", 1698 "Mass Storage",
1699 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1699 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1700 0), 1700 0),
1701UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000, 1701UNUSUAL_DEV( 0x12d1, 0x140C, 0x0000, 0x0000,
1702 "HUAWEI MOBILE", 1702 "HUAWEI MOBILE",
1703 "Mass Storage", 1703 "Mass Storage",
1704 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1704 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1705 0), 1705 0),
1706UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000, 1706UNUSUAL_DEV( 0x12d1, 0x140D, 0x0000, 0x0000,
1707 "HUAWEI MOBILE", 1707 "HUAWEI MOBILE",
1708 "Mass Storage", 1708 "Mass Storage",
1709 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1709 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1710 0), 1710 0),
1711UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000, 1711UNUSUAL_DEV( 0x12d1, 0x140E, 0x0000, 0x0000,
1712 "HUAWEI MOBILE", 1712 "HUAWEI MOBILE",
1713 "Mass Storage", 1713 "Mass Storage",
1714 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1714 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1715 0), 1715 0),
1716UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000, 1716UNUSUAL_DEV( 0x12d1, 0x140F, 0x0000, 0x0000,
1717 "HUAWEI MOBILE", 1717 "HUAWEI MOBILE",
1718 "Mass Storage", 1718 "Mass Storage",
1719 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1719 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1720 0), 1720 0),
1721UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000, 1721UNUSUAL_DEV( 0x12d1, 0x1410, 0x0000, 0x0000,
1722 "HUAWEI MOBILE",
1723 "Mass Storage",
1724 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1725 0),
1726UNUSUAL_DEV( 0x12d1, 0x1411, 0x0000, 0x0000,
1727 "HUAWEI MOBILE",
1728 "Mass Storage",
1729 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1730 0),
1731UNUSUAL_DEV( 0x12d1, 0x1412, 0x0000, 0x0000,
1732 "HUAWEI MOBILE",
1733 "Mass Storage",
1734 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1735 0),
1736UNUSUAL_DEV( 0x12d1, 0x1413, 0x0000, 0x0000,
1737 "HUAWEI MOBILE",
1738 "Mass Storage",
1739 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1740 0),
1741UNUSUAL_DEV( 0x12d1, 0x1414, 0x0000, 0x0000,
1742 "HUAWEI MOBILE",
1743 "Mass Storage",
1744 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1745 0),
1746UNUSUAL_DEV( 0x12d1, 0x1415, 0x0000, 0x0000,
1747 "HUAWEI MOBILE",
1748 "Mass Storage",
1749 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1750 0),
1751UNUSUAL_DEV( 0x12d1, 0x1416, 0x0000, 0x0000,
1752 "HUAWEI MOBILE",
1753 "Mass Storage",
1754 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1755 0),
1756UNUSUAL_DEV( 0x12d1, 0x1417, 0x0000, 0x0000,
1757 "HUAWEI MOBILE",
1758 "Mass Storage",
1759 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1760 0),
1761UNUSUAL_DEV( 0x12d1, 0x1418, 0x0000, 0x0000,
1762 "HUAWEI MOBILE",
1763 "Mass Storage",
1764 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1765 0),
1766UNUSUAL_DEV( 0x12d1, 0x1419, 0x0000, 0x0000,
1767 "HUAWEI MOBILE",
1768 "Mass Storage",
1769 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1770 0),
1771UNUSUAL_DEV( 0x12d1, 0x141A, 0x0000, 0x0000,
1772 "HUAWEI MOBILE",
1773 "Mass Storage",
1774 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1775 0),
1776UNUSUAL_DEV( 0x12d1, 0x141B, 0x0000, 0x0000,
1777 "HUAWEI MOBILE",
1778 "Mass Storage",
1779 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1780 0),
1781UNUSUAL_DEV( 0x12d1, 0x141C, 0x0000, 0x0000,
1782 "HUAWEI MOBILE",
1783 "Mass Storage",
1784 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1785 0),
1786UNUSUAL_DEV( 0x12d1, 0x141D, 0x0000, 0x0000,
1787 "HUAWEI MOBILE",
1788 "Mass Storage",
1789 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1790 0),
1791UNUSUAL_DEV( 0x12d1, 0x141E, 0x0000, 0x0000,
1792 "HUAWEI MOBILE",
1793 "Mass Storage",
1794 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1795 0),
1796UNUSUAL_DEV( 0x12d1, 0x141F, 0x0000, 0x0000,
1797 "HUAWEI MOBILE",
1798 "Mass Storage",
1799 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1800 0),
1801UNUSUAL_DEV( 0x12d1, 0x1420, 0x0000, 0x0000,
1802 "HUAWEI MOBILE",
1803 "Mass Storage",
1804 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1805 0),
1806UNUSUAL_DEV( 0x12d1, 0x1421, 0x0000, 0x0000,
1807 "HUAWEI MOBILE",
1808 "Mass Storage",
1809 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1810 0),
1811UNUSUAL_DEV( 0x12d1, 0x1422, 0x0000, 0x0000,
1812 "HUAWEI MOBILE",
1813 "Mass Storage",
1814 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1815 0),
1816UNUSUAL_DEV( 0x12d1, 0x1423, 0x0000, 0x0000,
1817 "HUAWEI MOBILE",
1818 "Mass Storage",
1819 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1820 0),
1821UNUSUAL_DEV( 0x12d1, 0x1424, 0x0000, 0x0000,
1822 "HUAWEI MOBILE",
1823 "Mass Storage",
1824 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1825 0),
1826UNUSUAL_DEV( 0x12d1, 0x1425, 0x0000, 0x0000,
1827 "HUAWEI MOBILE",
1828 "Mass Storage",
1829 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1830 0),
1831UNUSUAL_DEV( 0x12d1, 0x1426, 0x0000, 0x0000,
1832 "HUAWEI MOBILE",
1833 "Mass Storage",
1834 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1835 0),
1836UNUSUAL_DEV( 0x12d1, 0x1427, 0x0000, 0x0000,
1837 "HUAWEI MOBILE",
1838 "Mass Storage",
1839 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1840 0),
1841UNUSUAL_DEV( 0x12d1, 0x1428, 0x0000, 0x0000,
1842 "HUAWEI MOBILE",
1843 "Mass Storage",
1844 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1845 0),
1846UNUSUAL_DEV( 0x12d1, 0x1429, 0x0000, 0x0000,
1847 "HUAWEI MOBILE",
1848 "Mass Storage",
1849 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1850 0),
1851UNUSUAL_DEV( 0x12d1, 0x142A, 0x0000, 0x0000,
1852 "HUAWEI MOBILE",
1853 "Mass Storage",
1854 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1855 0),
1856UNUSUAL_DEV( 0x12d1, 0x142B, 0x0000, 0x0000,
1857 "HUAWEI MOBILE",
1858 "Mass Storage",
1859 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1860 0),
1861UNUSUAL_DEV( 0x12d1, 0x142C, 0x0000, 0x0000,
1862 "HUAWEI MOBILE",
1863 "Mass Storage",
1864 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1865 0),
1866UNUSUAL_DEV( 0x12d1, 0x142D, 0x0000, 0x0000,
1867 "HUAWEI MOBILE",
1868 "Mass Storage",
1869 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1870 0),
1871UNUSUAL_DEV( 0x12d1, 0x142E, 0x0000, 0x0000,
1872 "HUAWEI MOBILE",
1873 "Mass Storage",
1874 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1875 0),
1876UNUSUAL_DEV( 0x12d1, 0x142F, 0x0000, 0x0000,
1877 "HUAWEI MOBILE",
1878 "Mass Storage",
1879 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1880 0),
1881UNUSUAL_DEV( 0x12d1, 0x1430, 0x0000, 0x0000,
1882 "HUAWEI MOBILE",
1883 "Mass Storage",
1884 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1885 0),
1886UNUSUAL_DEV( 0x12d1, 0x1431, 0x0000, 0x0000,
1887 "HUAWEI MOBILE",
1888 "Mass Storage",
1889 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1890 0),
1891UNUSUAL_DEV( 0x12d1, 0x1432, 0x0000, 0x0000,
1892 "HUAWEI MOBILE",
1893 "Mass Storage",
1894 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1895 0),
1896UNUSUAL_DEV( 0x12d1, 0x1433, 0x0000, 0x0000,
1897 "HUAWEI MOBILE",
1898 "Mass Storage",
1899 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1900 0),
1901UNUSUAL_DEV( 0x12d1, 0x1434, 0x0000, 0x0000,
1902 "HUAWEI MOBILE",
1903 "Mass Storage",
1904 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1905 0),
1906UNUSUAL_DEV( 0x12d1, 0x1435, 0x0000, 0x0000,
1907 "HUAWEI MOBILE",
1908 "Mass Storage",
1909 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1910 0),
1911UNUSUAL_DEV( 0x12d1, 0x1436, 0x0000, 0x0000,
1912 "HUAWEI MOBILE",
1913 "Mass Storage",
1914 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1915 0),
1916UNUSUAL_DEV( 0x12d1, 0x1437, 0x0000, 0x0000,
1917 "HUAWEI MOBILE",
1918 "Mass Storage",
1919 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1920 0),
1921UNUSUAL_DEV( 0x12d1, 0x1438, 0x0000, 0x0000,
1922 "HUAWEI MOBILE",
1923 "Mass Storage",
1924 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1925 0),
1926UNUSUAL_DEV( 0x12d1, 0x1439, 0x0000, 0x0000,
1927 "HUAWEI MOBILE",
1928 "Mass Storage",
1929 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1930 0),
1931UNUSUAL_DEV( 0x12d1, 0x143A, 0x0000, 0x0000,
1932 "HUAWEI MOBILE",
1933 "Mass Storage",
1934 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1935 0),
1936UNUSUAL_DEV( 0x12d1, 0x143B, 0x0000, 0x0000,
1937 "HUAWEI MOBILE",
1938 "Mass Storage",
1939 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1940 0),
1941UNUSUAL_DEV( 0x12d1, 0x143C, 0x0000, 0x0000,
1942 "HUAWEI MOBILE",
1943 "Mass Storage",
1944 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1945 0),
1946UNUSUAL_DEV( 0x12d1, 0x143D, 0x0000, 0x0000,
1947 "HUAWEI MOBILE",
1948 "Mass Storage",
1949 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1950 0),
1951UNUSUAL_DEV( 0x12d1, 0x143E, 0x0000, 0x0000,
1952 "HUAWEI MOBILE",
1953 "Mass Storage",
1954 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
1955 0),
1956UNUSUAL_DEV( 0x12d1, 0x143F, 0x0000, 0x0000,
1722 "HUAWEI MOBILE", 1957 "HUAWEI MOBILE",
1723 "Mass Storage", 1958 "Mass Storage",
1724 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init, 1959 US_SC_DEVICE, US_PR_DEVICE, usb_stor_huawei_e220_init,
@@ -1745,6 +1980,15 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1745 US_SC_DEVICE, US_PR_DEVICE, NULL, 1980 US_SC_DEVICE, US_PR_DEVICE, NULL,
1746 US_FL_IGNORE_RESIDUE ), 1981 US_FL_IGNORE_RESIDUE ),
1747 1982
1983/* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
1984 * JMicron responds to USN and several other SCSI ioctls with a
1985 * residue that causes subsequent I/O requests to fail. */
1986UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
1987 "JMicron",
1988 "USB to ATA/ATAPI Bridge",
1989 US_SC_DEVICE, US_PR_DEVICE, NULL,
1990 US_FL_IGNORE_RESIDUE ),
1991
1748/* Reported by Robert Schedel <r.schedel@yahoo.de> 1992/* Reported by Robert Schedel <r.schedel@yahoo.de>
1749 * Note: this is a 'super top' device like the above 14cd/6600 device */ 1993 * Note: this is a 'super top' device like the above 14cd/6600 device */
1750UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201, 1994UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
@@ -1818,6 +2062,15 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
1818 US_SC_DEVICE, US_PR_DEVICE, NULL, 2062 US_SC_DEVICE, US_PR_DEVICE, NULL,
1819 US_FL_FIX_CAPACITY ), 2063 US_FL_FIX_CAPACITY ),
1820 2064
2065/* Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
2066 * Mio Moov 330
2067 */
2068UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000,
2069 "Mitac",
2070 "Mio DigiWalker USB Sync",
2071 US_SC_DEVICE,US_PR_DEVICE,NULL,
2072 US_FL_MAX_SECTORS_64 ),
2073
1821/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */ 2074/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
1822UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, 2075UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
1823 "iRiver", 2076 "iRiver",
diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig
new file mode 100644
index 000000000000..eb09a0a14a80
--- /dev/null
+++ b/drivers/usb/wusbcore/Kconfig
@@ -0,0 +1,41 @@
1#
2# Wireless USB Core configuration
3#
4config USB_WUSB
5 tristate "Enable Wireless USB extensions (EXPERIMENTAL)"
6 depends on EXPERIMENTAL
7 depends on USB
8 select UWB
9 select CRYPTO
10 select CRYPTO_BLKCIPHER
11 select CRYPTO_CBC
12 select CRYPTO_MANAGER
13 select CRYPTO_AES
14 help
15 Enable the host-side support for Wireless USB.
16
17 To compile this support select Y (built in). It is safe to
18 select even if you don't have the hardware.
19
20config USB_WUSB_CBAF
21 tristate "Support WUSB Cable Based Association (CBA)"
22 depends on USB
23 help
24 Some WUSB devices support Cable Based Association. It's used to
25 enable the secure communication between the host and the
26 device.
27
28 Enable this option if your WUSB device must to be connected
29 via wired USB before establishing a wireless link.
30
31 It is safe to select even if you don't have a compatible
32 hardware.
33
34config USB_WUSB_CBAF_DEBUG
35 bool "Enable CBA debug messages"
36 depends on USB_WUSB_CBAF
37 help
38 Say Y here if you want the CBA to produce a bunch of debug messages
39 to the system log. Select this if you are having a problem with
40 CBA support and want to see more of what is going on.
41
diff --git a/drivers/usb/wusbcore/Makefile b/drivers/usb/wusbcore/Makefile
new file mode 100644
index 000000000000..75f1ade66258
--- /dev/null
+++ b/drivers/usb/wusbcore/Makefile
@@ -0,0 +1,26 @@
1obj-$(CONFIG_USB_WUSB) += wusbcore.o
2obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o
3obj-$(CONFIG_USB_WUSB_CBAF) += wusb-cbaf.o
4
5
6wusbcore-objs := \
7 crypto.o \
8 devconnect.o \
9 dev-sysfs.o \
10 mmc.o \
11 pal.o \
12 rh.o \
13 reservation.o \
14 security.o \
15 wusbhc.o
16
17wusb-cbaf-objs := cbaf.o
18
19wusb-wa-objs := wa-hc.o \
20 wa-nep.o \
21 wa-rpipe.o \
22 wa-xfer.o
23
24ifeq ($(CONFIG_USB_WUSB_CBAF_DEBUG),y)
25EXTRA_CFLAGS += -DDEBUG
26endif
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
new file mode 100644
index 000000000000..ab4788d1785a
--- /dev/null
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -0,0 +1,673 @@
1/*
2 * Wireless USB - Cable Based Association
3 *
4 *
5 * Copyright (C) 2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * WUSB devices have to be paired (associated in WUSB lingo) so
25 * that they can connect to the system.
26 *
27 * One way of pairing is using CBA-Cable Based Association. First
28 * time you plug the device with a cable, association is done between
29 * host and device and subsequent times, you can connect wirelessly
30 * without having to associate again. That's the idea.
31 *
32 * This driver does nothing Earth shattering. It just provides an
33 * interface to chat with the wire-connected device so we can get a
34 * CDID (device ID) that might have been previously associated to a
35 * CHID (host ID) and to set up a new <CHID,CDID,CK> triplet
36 * (connection context), with the CK being the secret, or connection
37 * key. This is the pairing data.
38 *
39 * When a device with the CBA capability connects, the probe routine
40 * just creates a bunch of sysfs files that a user space enumeration
41 * manager uses to allow it to connect wirelessly to the system or not.
42 *
43 * The process goes like this:
44 *
45 * 1. Device plugs, cbaf is loaded, notifications happen.
46 *
47 * 2. The connection manager (CM) sees a device with CBAF capability
48 * (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE).
49 *
50 * 3. The CM writes the host name, supported band groups, and the CHID
51 * (host ID) into the wusb_host_name, wusb_host_band_groups and
52 * wusb_chid files. These get sent to the device and the CDID (if
53 * any) for this host is requested.
54 *
55 * 4. The CM can verify that the device's supported band groups
56 * (wusb_device_band_groups) are compatible with the host.
57 *
58 * 5. The CM reads the wusb_cdid file.
59 *
60 * 6. The CM looks up its database
61 *
62 * 6.1 If it has a matching CHID,CDID entry, the device has been
63 * authorized before (paired) and nothing further needs to be
64 * done.
65 *
66 * 6.2 If the CDID is zero (or the CM doesn't find a matching CDID in
67 * its database), the device is assumed to be not known. The CM
68 * may associate the host with device by: writing a randomly
69 * generated CDID to wusb_cdid and then a random CK to wusb_ck
70 * (this uploads the new CC to the device).
71 *
72 * CMD may choose to prompt the user before associating with a new
73 * device.
74 *
75 * 7. Device is unplugged.
76 *
77 * When the device tries to connect wirelessly, it will present its
78 * CDID to the WUSB host controller. The CM will query the
79 * database. If the CHID/CDID pair found, it will (with a 4-way
80 * handshake) challenge the device to demonstrate it has the CK secret
81 * key (from our database) without actually exchanging it. Once
82 * satisfied, crypto keys are derived from the CK, the device is
83 * connected and all communication is encrypted.
84 *
85 * References:
86 * [WUSB-AM] Association Models Supplement to the Certified Wireless
87 * Universal Serial Bus Specification, version 1.0.
88 */
89#include <linux/module.h>
90#include <linux/ctype.h>
91#include <linux/version.h>
92#include <linux/usb.h>
93#include <linux/interrupt.h>
94#include <linux/delay.h>
95#include <linux/random.h>
96#include <linux/mutex.h>
97#include <linux/uwb.h>
98#include <linux/usb/wusb.h>
99#include <linux/usb/association.h>
100
101#define CBA_NAME_LEN 0x40 /* [WUSB-AM] table 4-7 */
102
103/* An instance of a Cable-Based-Association-Framework device */
104struct cbaf {
105 struct usb_device *usb_dev;
106 struct usb_interface *usb_iface;
107 void *buffer;
108 size_t buffer_size;
109
110 struct wusb_ckhdid chid;
111 char host_name[CBA_NAME_LEN];
112 u16 host_band_groups;
113
114 struct wusb_ckhdid cdid;
115 char device_name[CBA_NAME_LEN];
116 u16 device_band_groups;
117
118 struct wusb_ckhdid ck;
119};
120
121/*
122 * Verify that a CBAF USB-interface has what we need
123 *
124 * According to [WUSB-AM], CBA devices should provide at least two
125 * interfaces:
126 * - RETRIEVE_HOST_INFO
127 * - ASSOCIATE
128 *
129 * If the device doesn't provide these interfaces, we do not know how
130 * to deal with it.
131 */
132static int cbaf_check(struct cbaf *cbaf)
133{
134 int result;
135 struct device *dev = &cbaf->usb_iface->dev;
136 struct wusb_cbaf_assoc_info *assoc_info;
137 struct wusb_cbaf_assoc_request *assoc_request;
138 size_t assoc_size;
139 void *itr, *top;
140 int ar_rhi = 0, ar_assoc = 0;
141
142 result = usb_control_msg(
143 cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
144 CBAF_REQ_GET_ASSOCIATION_INFORMATION,
145 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
146 0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
147 cbaf->buffer, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
148 if (result < 0) {
149 dev_err(dev, "Cannot get available association types: %d\n",
150 result);
151 return result;
152 }
153
154 assoc_info = cbaf->buffer;
155 if (result < sizeof(*assoc_info)) {
156 dev_err(dev, "Not enough data to decode association info "
157 "header (%zu vs %zu bytes required)\n",
158 (size_t)result, sizeof(*assoc_info));
159 return result;
160 }
161
162 assoc_size = le16_to_cpu(assoc_info->Length);
163 if (result < assoc_size) {
164 dev_err(dev, "Not enough data to decode association info "
165 "(%zu vs %zu bytes required)\n",
166 (size_t)assoc_size, sizeof(*assoc_info));
167 return result;
168 }
169 /*
170 * From now on, we just verify, but won't error out unless we
171 * don't find the AR_TYPE_WUSB_{RETRIEVE_HOST_INFO,ASSOCIATE}
172 * types.
173 */
174 itr = cbaf->buffer + sizeof(*assoc_info);
175 top = cbaf->buffer + assoc_size;
176 dev_dbg(dev, "Found %u association requests (%zu bytes)\n",
177 assoc_info->NumAssociationRequests, assoc_size);
178
179 while (itr < top) {
180 u16 ar_type, ar_subtype;
181 u32 ar_size;
182 const char *ar_name;
183
184 assoc_request = itr;
185
186 if (top - itr < sizeof(*assoc_request)) {
187 dev_err(dev, "Not enough data to decode associaton "
188 "request (%zu vs %zu bytes needed)\n",
189 top - itr, sizeof(*assoc_request));
190 break;
191 }
192
193 ar_type = le16_to_cpu(assoc_request->AssociationTypeId);
194 ar_subtype = le16_to_cpu(assoc_request->AssociationSubTypeId);
195 ar_size = le32_to_cpu(assoc_request->AssociationTypeInfoSize);
196 ar_name = "unknown";
197
198 switch (ar_type) {
199 case AR_TYPE_WUSB:
200 /* Verify we have what is mandated by [WUSB-AM]. */
201 switch (ar_subtype) {
202 case AR_TYPE_WUSB_RETRIEVE_HOST_INFO:
203 ar_name = "RETRIEVE_HOST_INFO";
204 ar_rhi = 1;
205 break;
206 case AR_TYPE_WUSB_ASSOCIATE:
207 /* send assoc data */
208 ar_name = "ASSOCIATE";
209 ar_assoc = 1;
210 break;
211 };
212 break;
213 };
214
215 dev_dbg(dev, "Association request #%02u: 0x%04x/%04x "
216 "(%zu bytes): %s\n",
217 assoc_request->AssociationDataIndex, ar_type,
218 ar_subtype, (size_t)ar_size, ar_name);
219
220 itr += sizeof(*assoc_request);
221 }
222
223 if (!ar_rhi) {
224 dev_err(dev, "Missing RETRIEVE_HOST_INFO association "
225 "request\n");
226 return -EINVAL;
227 }
228 if (!ar_assoc) {
229 dev_err(dev, "Missing ASSOCIATE association request\n");
230 return -EINVAL;
231 }
232
233 return 0;
234}
235
236static const struct wusb_cbaf_host_info cbaf_host_info_defaults = {
237 .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
238 .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
239 .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
240 .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO),
241 .CHID_hdr = WUSB_AR_CHID,
242 .LangID_hdr = WUSB_AR_LangID,
243 .HostFriendlyName_hdr = WUSB_AR_HostFriendlyName,
244};
245
246/* Send WUSB host information (CHID and name) to a CBAF device */
247static int cbaf_send_host_info(struct cbaf *cbaf)
248{
249 struct wusb_cbaf_host_info *hi;
250 size_t name_len;
251 size_t hi_size;
252
253 hi = cbaf->buffer;
254 memset(hi, 0, sizeof(*hi));
255 *hi = cbaf_host_info_defaults;
256 hi->CHID = cbaf->chid;
257 hi->LangID = 0; /* FIXME: I guess... */
258 strlcpy(hi->HostFriendlyName, cbaf->host_name, CBA_NAME_LEN);
259 name_len = strlen(cbaf->host_name);
260 hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len);
261 hi_size = sizeof(*hi) + name_len;
262
263 return usb_control_msg(cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
264 CBAF_REQ_SET_ASSOCIATION_RESPONSE,
265 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
266 0x0101,
267 cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
268 hi, hi_size, 1000 /* FIXME: arbitrary */);
269}
270
271/*
272 * Get device's information (CDID) associated to CHID
273 *
274 * The device will return it's information (CDID, name, bandgroups)
275 * associated to the CHID we have set before, or 0 CDID and default
276 * name and bandgroup if no CHID set or unknown.
277 */
278static int cbaf_cdid_get(struct cbaf *cbaf)
279{
280 int result;
281 struct device *dev = &cbaf->usb_iface->dev;
282 struct wusb_cbaf_device_info *di;
283 size_t needed;
284
285 di = cbaf->buffer;
286 result = usb_control_msg(
287 cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
288 CBAF_REQ_GET_ASSOCIATION_REQUEST,
289 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
290 0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
291 di, cbaf->buffer_size, 1000 /* FIXME: arbitrary */);
292 if (result < 0) {
293 dev_err(dev, "Cannot request device information: %d\n", result);
294 return result;
295 }
296
297 needed = result < sizeof(*di) ? sizeof(*di) : le32_to_cpu(di->Length);
298 if (result < needed) {
299 dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs "
300 "%zu bytes needed)\n", (size_t)result, needed);
301 return result;
302 }
303
304 strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN);
305 cbaf->cdid = di->CDID;
306 cbaf->device_band_groups = le16_to_cpu(di->BandGroups);
307
308 return 0;
309}
310
311static ssize_t cbaf_wusb_chid_show(struct device *dev,
312 struct device_attribute *attr,
313 char *buf)
314{
315 struct usb_interface *iface = to_usb_interface(dev);
316 struct cbaf *cbaf = usb_get_intfdata(iface);
317 char pr_chid[WUSB_CKHDID_STRSIZE];
318
319 ckhdid_printf(pr_chid, sizeof(pr_chid), &cbaf->chid);
320 return scnprintf(buf, PAGE_SIZE, "%s\n", pr_chid);
321}
322
323static ssize_t cbaf_wusb_chid_store(struct device *dev,
324 struct device_attribute *attr,
325 const char *buf, size_t size)
326{
327 ssize_t result;
328 struct usb_interface *iface = to_usb_interface(dev);
329 struct cbaf *cbaf = usb_get_intfdata(iface);
330
331 result = sscanf(buf,
332 "%02hhx %02hhx %02hhx %02hhx "
333 "%02hhx %02hhx %02hhx %02hhx "
334 "%02hhx %02hhx %02hhx %02hhx "
335 "%02hhx %02hhx %02hhx %02hhx",
336 &cbaf->chid.data[0] , &cbaf->chid.data[1],
337 &cbaf->chid.data[2] , &cbaf->chid.data[3],
338 &cbaf->chid.data[4] , &cbaf->chid.data[5],
339 &cbaf->chid.data[6] , &cbaf->chid.data[7],
340 &cbaf->chid.data[8] , &cbaf->chid.data[9],
341 &cbaf->chid.data[10], &cbaf->chid.data[11],
342 &cbaf->chid.data[12], &cbaf->chid.data[13],
343 &cbaf->chid.data[14], &cbaf->chid.data[15]);
344
345 if (result != 16)
346 return -EINVAL;
347
348 result = cbaf_send_host_info(cbaf);
349 if (result < 0)
350 return result;
351 result = cbaf_cdid_get(cbaf);
352 if (result < 0)
353 return -result;
354 return size;
355}
356static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store);
357
358static ssize_t cbaf_wusb_host_name_show(struct device *dev,
359 struct device_attribute *attr,
360 char *buf)
361{
362 struct usb_interface *iface = to_usb_interface(dev);
363 struct cbaf *cbaf = usb_get_intfdata(iface);
364
365 return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->host_name);
366}
367
368static ssize_t cbaf_wusb_host_name_store(struct device *dev,
369 struct device_attribute *attr,
370 const char *buf, size_t size)
371{
372 ssize_t result;
373 struct usb_interface *iface = to_usb_interface(dev);
374 struct cbaf *cbaf = usb_get_intfdata(iface);
375
376 result = sscanf(buf, "%63s", cbaf->host_name);
377 if (result != 1)
378 return -EINVAL;
379
380 return size;
381}
382static DEVICE_ATTR(wusb_host_name, 0600, cbaf_wusb_host_name_show,
383 cbaf_wusb_host_name_store);
384
385static ssize_t cbaf_wusb_host_band_groups_show(struct device *dev,
386 struct device_attribute *attr,
387 char *buf)
388{
389 struct usb_interface *iface = to_usb_interface(dev);
390 struct cbaf *cbaf = usb_get_intfdata(iface);
391
392 return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->host_band_groups);
393}
394
395static ssize_t cbaf_wusb_host_band_groups_store(struct device *dev,
396 struct device_attribute *attr,
397 const char *buf, size_t size)
398{
399 ssize_t result;
400 struct usb_interface *iface = to_usb_interface(dev);
401 struct cbaf *cbaf = usb_get_intfdata(iface);
402 u16 band_groups = 0;
403
404 result = sscanf(buf, "%04hx", &band_groups);
405 if (result != 1)
406 return -EINVAL;
407
408 cbaf->host_band_groups = band_groups;
409
410 return size;
411}
412
413static DEVICE_ATTR(wusb_host_band_groups, 0600,
414 cbaf_wusb_host_band_groups_show,
415 cbaf_wusb_host_band_groups_store);
416
417static const struct wusb_cbaf_device_info cbaf_device_info_defaults = {
418 .Length_hdr = WUSB_AR_Length,
419 .CDID_hdr = WUSB_AR_CDID,
420 .BandGroups_hdr = WUSB_AR_BandGroups,
421 .LangID_hdr = WUSB_AR_LangID,
422 .DeviceFriendlyName_hdr = WUSB_AR_DeviceFriendlyName,
423};
424
425static ssize_t cbaf_wusb_cdid_show(struct device *dev,
426 struct device_attribute *attr, char *buf)
427{
428 struct usb_interface *iface = to_usb_interface(dev);
429 struct cbaf *cbaf = usb_get_intfdata(iface);
430 char pr_cdid[WUSB_CKHDID_STRSIZE];
431
432 ckhdid_printf(pr_cdid, sizeof(pr_cdid), &cbaf->cdid);
433 return scnprintf(buf, PAGE_SIZE, "%s\n", pr_cdid);
434}
435
436static ssize_t cbaf_wusb_cdid_store(struct device *dev,
437 struct device_attribute *attr,
438 const char *buf, size_t size)
439{
440 ssize_t result;
441 struct usb_interface *iface = to_usb_interface(dev);
442 struct cbaf *cbaf = usb_get_intfdata(iface);
443 struct wusb_ckhdid cdid;
444
445 result = sscanf(buf,
446 "%02hhx %02hhx %02hhx %02hhx "
447 "%02hhx %02hhx %02hhx %02hhx "
448 "%02hhx %02hhx %02hhx %02hhx "
449 "%02hhx %02hhx %02hhx %02hhx",
450 &cdid.data[0] , &cdid.data[1],
451 &cdid.data[2] , &cdid.data[3],
452 &cdid.data[4] , &cdid.data[5],
453 &cdid.data[6] , &cdid.data[7],
454 &cdid.data[8] , &cdid.data[9],
455 &cdid.data[10], &cdid.data[11],
456 &cdid.data[12], &cdid.data[13],
457 &cdid.data[14], &cdid.data[15]);
458 if (result != 16)
459 return -EINVAL;
460
461 cbaf->cdid = cdid;
462
463 return size;
464}
465static DEVICE_ATTR(wusb_cdid, 0600, cbaf_wusb_cdid_show, cbaf_wusb_cdid_store);
466
467static ssize_t cbaf_wusb_device_band_groups_show(struct device *dev,
468 struct device_attribute *attr,
469 char *buf)
470{
471 struct usb_interface *iface = to_usb_interface(dev);
472 struct cbaf *cbaf = usb_get_intfdata(iface);
473
474 return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->device_band_groups);
475}
476
477static DEVICE_ATTR(wusb_device_band_groups, 0600,
478 cbaf_wusb_device_band_groups_show,
479 NULL);
480
481static ssize_t cbaf_wusb_device_name_show(struct device *dev,
482 struct device_attribute *attr,
483 char *buf)
484{
485 struct usb_interface *iface = to_usb_interface(dev);
486 struct cbaf *cbaf = usb_get_intfdata(iface);
487
488 return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->device_name);
489}
490static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL);
491
492static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = {
493 .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
494 .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
495 .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
496 .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE),
497 .Length_hdr = WUSB_AR_Length,
498 .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
499 .ConnectionContext_hdr = WUSB_AR_ConnectionContext,
500 .BandGroups_hdr = WUSB_AR_BandGroups,
501};
502
503static const struct wusb_cbaf_cc_data_fail cbaf_cc_data_fail_defaults = {
504 .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
505 .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
506 .Length_hdr = WUSB_AR_Length,
507 .AssociationStatus_hdr = WUSB_AR_AssociationStatus,
508};
509
510/*
511 * Send a new CC to the device.
512 */
513static int cbaf_cc_upload(struct cbaf *cbaf)
514{
515 int result;
516 struct device *dev = &cbaf->usb_iface->dev;
517 struct wusb_cbaf_cc_data *ccd;
518 char pr_cdid[WUSB_CKHDID_STRSIZE];
519
520 ccd = cbaf->buffer;
521 *ccd = cbaf_cc_data_defaults;
522 ccd->CHID = cbaf->chid;
523 ccd->CDID = cbaf->cdid;
524 ccd->CK = cbaf->ck;
525 ccd->BandGroups = cpu_to_le16(cbaf->host_band_groups);
526
527 dev_dbg(dev, "Trying to upload CC:\n");
528 ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CHID);
529 dev_dbg(dev, " CHID %s\n", pr_cdid);
530 ckhdid_printf(pr_cdid, sizeof(pr_cdid), &ccd->CDID);
531 dev_dbg(dev, " CDID %s\n", pr_cdid);
532 dev_dbg(dev, " Bandgroups 0x%04x\n", cbaf->host_band_groups);
533
534 result = usb_control_msg(
535 cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
536 CBAF_REQ_SET_ASSOCIATION_RESPONSE,
537 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
538 0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
539 ccd, sizeof(*ccd), 1000 /* FIXME: arbitrary */);
540
541 return result;
542}
543
544static ssize_t cbaf_wusb_ck_store(struct device *dev,
545 struct device_attribute *attr,
546 const char *buf, size_t size)
547{
548 ssize_t result;
549 struct usb_interface *iface = to_usb_interface(dev);
550 struct cbaf *cbaf = usb_get_intfdata(iface);
551
552 result = sscanf(buf,
553 "%02hhx %02hhx %02hhx %02hhx "
554 "%02hhx %02hhx %02hhx %02hhx "
555 "%02hhx %02hhx %02hhx %02hhx "
556 "%02hhx %02hhx %02hhx %02hhx",
557 &cbaf->ck.data[0] , &cbaf->ck.data[1],
558 &cbaf->ck.data[2] , &cbaf->ck.data[3],
559 &cbaf->ck.data[4] , &cbaf->ck.data[5],
560 &cbaf->ck.data[6] , &cbaf->ck.data[7],
561 &cbaf->ck.data[8] , &cbaf->ck.data[9],
562 &cbaf->ck.data[10], &cbaf->ck.data[11],
563 &cbaf->ck.data[12], &cbaf->ck.data[13],
564 &cbaf->ck.data[14], &cbaf->ck.data[15]);
565 if (result != 16)
566 return -EINVAL;
567
568 result = cbaf_cc_upload(cbaf);
569 if (result < 0)
570 return result;
571
572 return size;
573}
574static DEVICE_ATTR(wusb_ck, 0600, NULL, cbaf_wusb_ck_store);
575
576static struct attribute *cbaf_dev_attrs[] = {
577 &dev_attr_wusb_host_name.attr,
578 &dev_attr_wusb_host_band_groups.attr,
579 &dev_attr_wusb_chid.attr,
580 &dev_attr_wusb_cdid.attr,
581 &dev_attr_wusb_device_name.attr,
582 &dev_attr_wusb_device_band_groups.attr,
583 &dev_attr_wusb_ck.attr,
584 NULL,
585};
586
587static struct attribute_group cbaf_dev_attr_group = {
588 .name = NULL, /* we want them in the same directory */
589 .attrs = cbaf_dev_attrs,
590};
591
592static int cbaf_probe(struct usb_interface *iface,
593 const struct usb_device_id *id)
594{
595 struct cbaf *cbaf;
596 struct device *dev = &iface->dev;
597 int result = -ENOMEM;
598
599 cbaf = kzalloc(sizeof(*cbaf), GFP_KERNEL);
600 if (cbaf == NULL)
601 goto error_kzalloc;
602 cbaf->buffer = kmalloc(512, GFP_KERNEL);
603 if (cbaf->buffer == NULL)
604 goto error_kmalloc_buffer;
605
606 cbaf->buffer_size = 512;
607 cbaf->usb_dev = usb_get_dev(interface_to_usbdev(iface));
608 cbaf->usb_iface = usb_get_intf(iface);
609 result = cbaf_check(cbaf);
610 if (result < 0) {
611 dev_err(dev, "This device is not WUSB-CBAF compliant"
612 "and is not supported yet.\n");
613 goto error_check;
614 }
615
616 result = sysfs_create_group(&dev->kobj, &cbaf_dev_attr_group);
617 if (result < 0) {
618 dev_err(dev, "Can't register sysfs attr group: %d\n", result);
619 goto error_create_group;
620 }
621 usb_set_intfdata(iface, cbaf);
622 return 0;
623
624error_create_group:
625error_check:
626 kfree(cbaf->buffer);
627error_kmalloc_buffer:
628 kfree(cbaf);
629error_kzalloc:
630 return result;
631}
632
633static void cbaf_disconnect(struct usb_interface *iface)
634{
635 struct cbaf *cbaf = usb_get_intfdata(iface);
636 struct device *dev = &iface->dev;
637 sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group);
638 usb_set_intfdata(iface, NULL);
639 usb_put_intf(iface);
640 kfree(cbaf->buffer);
641 /* paranoia: clean up crypto keys */
642 memset(cbaf, 0, sizeof(*cbaf));
643 kfree(cbaf);
644}
645
646static struct usb_device_id cbaf_id_table[] = {
647 { USB_INTERFACE_INFO(0xef, 0x03, 0x01), },
648 { },
649};
650MODULE_DEVICE_TABLE(usb, cbaf_id_table);
651
652static struct usb_driver cbaf_driver = {
653 .name = "wusb-cbaf",
654 .id_table = cbaf_id_table,
655 .probe = cbaf_probe,
656 .disconnect = cbaf_disconnect,
657};
658
659static int __init cbaf_driver_init(void)
660{
661 return usb_register(&cbaf_driver);
662}
663module_init(cbaf_driver_init);
664
665static void __exit cbaf_driver_exit(void)
666{
667 usb_deregister(&cbaf_driver);
668}
669module_exit(cbaf_driver_exit);
670
671MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
672MODULE_DESCRIPTION("Wireless USB Cable Based Association");
673MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
new file mode 100644
index 000000000000..c36c4389baae
--- /dev/null
+++ b/drivers/usb/wusbcore/crypto.c
@@ -0,0 +1,538 @@
1/*
2 * Ultra Wide Band
3 * AES-128 CCM Encryption
4 *
5 * Copyright (C) 2007 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * We don't do any encryption here; we use the Linux Kernel's AES-128
24 * crypto modules to construct keys and payload blocks in a way
25 * defined by WUSB1.0[6]. Check the erratas, as typos are are patched
26 * there.
27 *
28 * Thanks a zillion to John Keys for his help and clarifications over
29 * the designed-by-a-committee text.
30 *
31 * So the idea is that there is this basic Pseudo-Random-Function
32 * defined in WUSB1.0[6.5] which is the core of everything. It works
33 * by tweaking some blocks, AES crypting them and then xoring
34 * something else with them (this seems to be called CBC(AES) -- can
35 * you tell I know jack about crypto?). So we just funnel it into the
36 * Linux Crypto API.
37 *
38 * We leave a crypto test module so we can verify that vectors match,
39 * every now and then.
40 *
41 * Block size: 16 bytes -- AES seems to do things in 'block sizes'. I
42 * am learning a lot...
43 *
44 * Conveniently, some data structures that need to be
45 * funneled through AES are...16 bytes in size!
46 */
47
48#include <linux/crypto.h>
49#include <linux/module.h>
50#include <linux/err.h>
51#include <linux/uwb.h>
52#include <linux/usb/wusb.h>
53#include <linux/scatterlist.h>
54#define D_LOCAL 0
55#include <linux/uwb/debug.h>
56
57
58/*
59 * Block of data, as understood by AES-CCM
60 *
61 * The code assumes this structure is nothing but a 16 byte array
62 * (packed in a struct to avoid common mess ups that I usually do with
63 * arrays and enforcing type checking).
64 */
65struct aes_ccm_block {
66 u8 data[16];
67} __attribute__((packed));
68
69/*
70 * Counter-mode Blocks (WUSB1.0[6.4])
71 *
72 * According to CCM (or so it seems), for the purpose of calculating
73 * the MIC, the message is broken in N counter-mode blocks, B0, B1,
74 * ... BN.
75 *
76 * B0 contains flags, the CCM nonce and l(m).
77 *
78 * B1 contains l(a), the MAC header, the encryption offset and padding.
79 *
80 * If EO is nonzero, additional blocks are built from payload bytes
81 * until EO is exahusted (FIXME: padding to 16 bytes, I guess). The
82 * padding is not xmitted.
83 */
84
85/* WUSB1.0[T6.4] */
86struct aes_ccm_b0 {
87 u8 flags; /* 0x59, per CCM spec */
88 struct aes_ccm_nonce ccm_nonce;
89 __be16 lm;
90} __attribute__((packed));
91
92/* WUSB1.0[T6.5] */
93struct aes_ccm_b1 {
94 __be16 la;
95 u8 mac_header[10];
96 __le16 eo;
97 u8 security_reserved; /* This is always zero */
98 u8 padding; /* 0 */
99} __attribute__((packed));
100
101/*
102 * Encryption Blocks (WUSB1.0[6.4.4])
103 *
104 * CCM uses Ax blocks to generate a keystream with which the MIC and
105 * the message's payload are encoded. A0 always encrypts/decrypts the
106 * MIC. Ax (x>0) are used for the sucesive payload blocks.
107 *
108 * The x is the counter, and is increased for each block.
109 */
110struct aes_ccm_a {
111 u8 flags; /* 0x01, per CCM spec */
112 struct aes_ccm_nonce ccm_nonce;
113 __be16 counter; /* Value of x */
114} __attribute__((packed));
115
116static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
117 size_t size)
118{
119 u8 *bo = _bo;
120 const u8 *bi1 = _bi1, *bi2 = _bi2;
121 size_t itr;
122 for (itr = 0; itr < size; itr++)
123 bo[itr] = bi1[itr] ^ bi2[itr];
124}
125
126/*
127 * CC-MAC function WUSB1.0[6.5]
128 *
129 * Take a data string and produce the encrypted CBC Counter-mode MIC
130 *
131 * Note the names for most function arguments are made to (more or
132 * less) match those used in the pseudo-function definition given in
133 * WUSB1.0[6.5].
134 *
135 * @tfm_cbc: CBC(AES) blkcipher handle (initialized)
136 *
137 * @tfm_aes: AES cipher handle (initialized)
138 *
139 * @mic: buffer for placing the computed MIC (Message Integrity
140 * Code). This is exactly 8 bytes, and we expect the buffer to
141 * be at least eight bytes in length.
142 *
143 * @key: 128 bit symmetric key
144 *
145 * @n: CCM nonce
146 *
147 * @a: ASCII string, 14 bytes long (I guess zero padded if needed;
148 * we use exactly 14 bytes).
149 *
150 * @b: data stream to be processed; cannot be a global or const local
151 * (will confuse the scatterlists)
152 *
153 * @blen: size of b...
154 *
155 * Still not very clear how this is done, but looks like this: we
156 * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with
157 * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we
158 * take the payload and divide it in blocks (16 bytes), xor them with
159 * the previous crypto result (16 bytes) and crypt it, repeat the next
160 * block with the output of the previous one, rinse wash (I guess this
161 * is what AES CBC mode means...but I truly have no idea). So we use
162 * the CBC(AES) blkcipher, that does precisely that. The IV (Initial
163 * Vector) is 16 bytes and is set to zero, so
164 *
165 * See rfc3610. Linux crypto has a CBC implementation, but the
166 * documentation is scarce, to say the least, and the example code is
167 * so intricated that is difficult to understand how things work. Most
168 * of this is guess work -- bite me.
169 *
170 * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and
171 * using the 14 bytes of @a to fill up
172 * b1.{mac_header,e0,security_reserved,padding}.
173 *
174 * NOTE: The definiton of l(a) in WUSB1.0[6.5] vs the definition of
175 * l(m) is orthogonal, they bear no relationship, so it is not
176 * in conflict with the parameter's relation that
177 * WUSB1.0[6.4.2]) defines.
178 *
179 * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in
180 * first errata released on 2005/07.
181 *
182 * NOTE: we need to clean IV to zero at each invocation to make sure
183 * we start with a fresh empty Initial Vector, so that the CBC
184 * works ok.
185 *
186 * NOTE: blen is not aligned to a block size, we'll pad zeros, that's
187 * what sg[4] is for. Maybe there is a smarter way to do this.
188 */
189static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
190 struct crypto_cipher *tfm_aes, void *mic,
191 const struct aes_ccm_nonce *n,
192 const struct aes_ccm_label *a, const void *b,
193 size_t blen)
194{
195 int result = 0;
196 struct blkcipher_desc desc;
197 struct aes_ccm_b0 b0;
198 struct aes_ccm_b1 b1;
199 struct aes_ccm_a ax;
200 struct scatterlist sg[4], sg_dst;
201 void *iv, *dst_buf;
202 size_t ivsize, dst_size;
203 const u8 bzero[16] = { 0 };
204 size_t zero_padding;
205
206 d_fnstart(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, "
207 "n %p, a %p, b %p, blen %zu)\n",
208 tfm_cbc, tfm_aes, mic, n, a, b, blen);
209 /*
210 * These checks should be compile time optimized out
211 * ensure @a fills b1's mac_header and following fields
212 */
213 WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
214 WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
215 WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
216 WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));
217
218 result = -ENOMEM;
219 zero_padding = sizeof(struct aes_ccm_block)
220 - blen % sizeof(struct aes_ccm_block);
221 zero_padding = blen % sizeof(struct aes_ccm_block);
222 if (zero_padding)
223 zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
224 dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
225 dst_buf = kzalloc(dst_size, GFP_KERNEL);
226 if (dst_buf == NULL) {
227 printk(KERN_ERR "E: can't alloc destination buffer\n");
228 goto error_dst_buf;
229 }
230
231 iv = crypto_blkcipher_crt(tfm_cbc)->iv;
232 ivsize = crypto_blkcipher_ivsize(tfm_cbc);
233 memset(iv, 0, ivsize);
234
235 /* Setup B0 */
236 b0.flags = 0x59; /* Format B0 */
237 b0.ccm_nonce = *n;
238 b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
239
240 /* Setup B1
241 *
242 * The WUSB spec is anything but clear! WUSB1.0[6.5]
243 * says that to initialize B1 from A with 'l(a) = blen +
244 * 14'--after clarification, it means to use A's contents
245 * for MAC Header, EO, sec reserved and padding.
246 */
247 b1.la = cpu_to_be16(blen + 14);
248 memcpy(&b1.mac_header, a, sizeof(*a));
249
250 d_printf(4, NULL, "I: B0 (%zu bytes)\n", sizeof(b0));
251 d_dump(4, NULL, &b0, sizeof(b0));
252 d_printf(4, NULL, "I: B1 (%zu bytes)\n", sizeof(b1));
253 d_dump(4, NULL, &b1, sizeof(b1));
254 d_printf(4, NULL, "I: B (%zu bytes)\n", blen);
255 d_dump(4, NULL, b, blen);
256 d_printf(4, NULL, "I: B 0-padding (%zu bytes)\n", zero_padding);
257 d_printf(4, NULL, "D: IV before crypto (%zu)\n", ivsize);
258 d_dump(4, NULL, iv, ivsize);
259
260 sg_init_table(sg, ARRAY_SIZE(sg));
261 sg_set_buf(&sg[0], &b0, sizeof(b0));
262 sg_set_buf(&sg[1], &b1, sizeof(b1));
263 sg_set_buf(&sg[2], b, blen);
264 /* 0 if well behaved :) */
265 sg_set_buf(&sg[3], bzero, zero_padding);
266 sg_init_one(&sg_dst, dst_buf, dst_size);
267
268 desc.tfm = tfm_cbc;
269 desc.flags = 0;
270 result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size);
271 if (result < 0) {
272 printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n",
273 result);
274 goto error_cbc_crypt;
275 }
276 d_printf(4, NULL, "D: MIC tag\n");
277 d_dump(4, NULL, iv, ivsize);
278
279 /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5]
280 * The procedure is to AES crypt the A0 block and XOR the MIC
281 * Tag agains it; we only do the first 8 bytes and place it
282 * directly in the destination buffer.
283 *
284 * POS Crypto API: size is assumed to be AES's block size.
285 * Thanks for documenting it -- tip taken from airo.c
286 */
287 ax.flags = 0x01; /* as per WUSB 1.0 spec */
288 ax.ccm_nonce = *n;
289 ax.counter = 0;
290 crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
291 bytewise_xor(mic, &ax, iv, 8);
292 d_printf(4, NULL, "D: CTR[MIC]\n");
293 d_dump(4, NULL, &ax, 8);
294 d_printf(4, NULL, "D: CCM-MIC tag\n");
295 d_dump(4, NULL, mic, 8);
296 result = 8;
297error_cbc_crypt:
298 kfree(dst_buf);
299error_dst_buf:
300 d_fnend(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, "
301 "n %p, a %p, b %p, blen %zu)\n",
302 tfm_cbc, tfm_aes, mic, n, a, b, blen);
303 return result;
304}
305
306/*
307 * WUSB Pseudo Random Function (WUSB1.0[6.5])
308 *
309 * @b: buffer to the source data; cannot be a global or const local
310 * (will confuse the scatterlists)
311 */
312ssize_t wusb_prf(void *out, size_t out_size,
313 const u8 key[16], const struct aes_ccm_nonce *_n,
314 const struct aes_ccm_label *a,
315 const void *b, size_t blen, size_t len)
316{
317 ssize_t result, bytes = 0, bitr;
318 struct aes_ccm_nonce n = *_n;
319 struct crypto_blkcipher *tfm_cbc;
320 struct crypto_cipher *tfm_aes;
321 u64 sfn = 0;
322 __le64 sfn_le;
323
324 d_fnstart(3, NULL, "(out %p, out_size %zu, key %p, _n %p, "
325 "a %p, b %p, blen %zu, len %zu)\n", out, out_size,
326 key, _n, a, b, blen, len);
327
328 tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
329 if (IS_ERR(tfm_cbc)) {
330 result = PTR_ERR(tfm_cbc);
331 printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
332 goto error_alloc_cbc;
333 }
334 result = crypto_blkcipher_setkey(tfm_cbc, key, 16);
335 if (result < 0) {
336 printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
337 goto error_setkey_cbc;
338 }
339
340 tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
341 if (IS_ERR(tfm_aes)) {
342 result = PTR_ERR(tfm_aes);
343 printk(KERN_ERR "E: can't load AES: %d\n", (int)result);
344 goto error_alloc_aes;
345 }
346 result = crypto_cipher_setkey(tfm_aes, key, 16);
347 if (result < 0) {
348 printk(KERN_ERR "E: can't set AES key: %d\n", (int)result);
349 goto error_setkey_aes;
350 }
351
352 for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
353 sfn_le = cpu_to_le64(sfn++);
354 memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */
355 result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes,
356 &n, a, b, blen);
357 if (result < 0)
358 goto error_ccm_mac;
359 bytes += result;
360 }
361 result = bytes;
362error_ccm_mac:
363error_setkey_aes:
364 crypto_free_cipher(tfm_aes);
365error_alloc_aes:
366error_setkey_cbc:
367 crypto_free_blkcipher(tfm_cbc);
368error_alloc_cbc:
369 d_fnend(3, NULL, "(out %p, out_size %zu, key %p, _n %p, "
370 "a %p, b %p, blen %zu, len %zu) = %d\n", out, out_size,
371 key, _n, a, b, blen, len, (int)bytes);
372 return result;
373}
374
375/* WUSB1.0[A.2] test vectors */
376static const u8 stv_hsmic_key[16] = {
377 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
378 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
379};
380
381static const struct aes_ccm_nonce stv_hsmic_n = {
382 .sfn = { 0 },
383 .tkid = { 0x76, 0x98, 0x01, },
384 .dest_addr = { .data = { 0xbe, 0x00 } },
385 .src_addr = { .data = { 0x76, 0x98 } },
386};
387
388/*
389 * Out-of-band MIC Generation verification code
390 *
391 */
392static int wusb_oob_mic_verify(void)
393{
394 int result;
395 u8 mic[8];
396 /* WUSB1.0[A.2] test vectors
397 *
398 * Need to keep it in the local stack as GCC 4.1.3something
399 * messes up and generates noise.
400 */
401 struct usb_handshake stv_hsmic_hs = {
402 .bMessageNumber = 2,
403 .bStatus = 00,
404 .tTKID = { 0x76, 0x98, 0x01 },
405 .bReserved = 00,
406 .CDID = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
407 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
408 0x3c, 0x3d, 0x3e, 0x3f },
409 .nonce = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
410 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
411 0x2c, 0x2d, 0x2e, 0x2f },
412 .MIC = { 0x75, 0x6a, 0x97, 0x51, 0x0c, 0x8c,
413 0x14, 0x7b } ,
414 };
415 size_t hs_size;
416
417 result = wusb_oob_mic(mic, stv_hsmic_key, &stv_hsmic_n, &stv_hsmic_hs);
418 if (result < 0)
419 printk(KERN_ERR "E: WUSB OOB MIC test: failed: %d\n", result);
420 else if (memcmp(stv_hsmic_hs.MIC, mic, sizeof(mic))) {
421 printk(KERN_ERR "E: OOB MIC test: "
422 "mismatch between MIC result and WUSB1.0[A2]\n");
423 hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC);
424 printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size);
425 dump_bytes(NULL, &stv_hsmic_hs, hs_size);
426 printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n",
427 sizeof(stv_hsmic_n));
428 dump_bytes(NULL, &stv_hsmic_n, sizeof(stv_hsmic_n));
429 printk(KERN_ERR "E: MIC out:\n");
430 dump_bytes(NULL, mic, sizeof(mic));
431 printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n");
432 dump_bytes(NULL, stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC));
433 result = -EINVAL;
434 } else
435 result = 0;
436 return result;
437}
438
439/*
440 * Test vectors for Key derivation
441 *
442 * These come from WUSB1.0[6.5.1], the vectors in WUSB1.0[A.1]
443 * (errata corrected in 2005/07).
444 */
445static const u8 stv_key_a1[16] __attribute__ ((__aligned__(4))) = {
446 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87,
447 0x78, 0x69, 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f
448};
449
450static const struct aes_ccm_nonce stv_keydvt_n_a1 = {
451 .sfn = { 0 },
452 .tkid = { 0x76, 0x98, 0x01, },
453 .dest_addr = { .data = { 0xbe, 0x00 } },
454 .src_addr = { .data = { 0x76, 0x98 } },
455};
456
457static const struct wusb_keydvt_out stv_keydvt_out_a1 = {
458 .kck = {
459 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
460 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
461 },
462 .ptk = {
463 0xc8, 0x70, 0x62, 0x82, 0xb6, 0x7c, 0xe9, 0x06,
464 0x7b, 0xc5, 0x25, 0x69, 0xf2, 0x36, 0x61, 0x2d
465 }
466};
467
468/*
469 * Performa a test to make sure we match the vectors defined in
470 * WUSB1.0[A.1](Errata2006/12)
471 */
472static int wusb_key_derive_verify(void)
473{
474 int result = 0;
475 struct wusb_keydvt_out keydvt_out;
476 /* These come from WUSB1.0[A.1] + 2006/12 errata
477 * NOTE: can't make this const or global -- somehow it seems
478 * the scatterlists for crypto get confused and we get
479 * bad data. There is no doc on this... */
480 struct wusb_keydvt_in stv_keydvt_in_a1 = {
481 .hnonce = {
482 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
483 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
484 },
485 .dnonce = {
486 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
487 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f
488 }
489 };
490
491 result = wusb_key_derive(&keydvt_out, stv_key_a1, &stv_keydvt_n_a1,
492 &stv_keydvt_in_a1);
493 if (result < 0)
494 printk(KERN_ERR "E: WUSB key derivation test: "
495 "derivation failed: %d\n", result);
496 if (memcmp(&stv_keydvt_out_a1, &keydvt_out, sizeof(keydvt_out))) {
497 printk(KERN_ERR "E: WUSB key derivation test: "
498 "mismatch between key derivation result "
499 "and WUSB1.0[A1] Errata 2006/12\n");
500 printk(KERN_ERR "E: keydvt in: key (%zu bytes)\n",
501 sizeof(stv_key_a1));
502 dump_bytes(NULL, stv_key_a1, sizeof(stv_key_a1));
503 printk(KERN_ERR "E: keydvt in: nonce (%zu bytes)\n",
504 sizeof(stv_keydvt_n_a1));
505 dump_bytes(NULL, &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1));
506 printk(KERN_ERR "E: keydvt in: hnonce & dnonce (%zu bytes)\n",
507 sizeof(stv_keydvt_in_a1));
508 dump_bytes(NULL, &stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1));
509 printk(KERN_ERR "E: keydvt out: KCK\n");
510 dump_bytes(NULL, &keydvt_out.kck, sizeof(keydvt_out.kck));
511 printk(KERN_ERR "E: keydvt out: PTK\n");
512 dump_bytes(NULL, &keydvt_out.ptk, sizeof(keydvt_out.ptk));
513 result = -EINVAL;
514 } else
515 result = 0;
516 return result;
517}
518
519/*
520 * Initialize crypto system
521 *
522 * FIXME: we do nothing now, other than verifying. Later on we'll
523 * cache the encryption stuff, so that's why we have a separate init.
524 */
525int wusb_crypto_init(void)
526{
527 int result;
528
529 result = wusb_key_derive_verify();
530 if (result < 0)
531 return result;
532 return wusb_oob_mic_verify();
533}
534
535void wusb_crypto_exit(void)
536{
537 /* FIXME: free cached crypto transforms */
538}
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c
new file mode 100644
index 000000000000..7897a19652e5
--- /dev/null
+++ b/drivers/usb/wusbcore/dev-sysfs.c
@@ -0,0 +1,143 @@
1/*
2 * WUSB devices
3 * sysfs bindings
4 *
5 * Copyright (C) 2007 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Get them out of the way...
24 */
25
26#include <linux/jiffies.h>
27#include <linux/ctype.h>
28#include <linux/workqueue.h>
29#include "wusbhc.h"
30
31#undef D_LOCAL
32#define D_LOCAL 4
33#include <linux/uwb/debug.h>
34
35static ssize_t wusb_disconnect_store(struct device *dev,
36 struct device_attribute *attr,
37 const char *buf, size_t size)
38{
39 struct usb_device *usb_dev;
40 struct wusbhc *wusbhc;
41 unsigned command;
42 u8 port_idx;
43
44 if (sscanf(buf, "%u", &command) != 1)
45 return -EINVAL;
46 if (command == 0)
47 return size;
48 usb_dev = to_usb_device(dev);
49 wusbhc = wusbhc_get_by_usb_dev(usb_dev);
50 if (wusbhc == NULL)
51 return -ENODEV;
52
53 mutex_lock(&wusbhc->mutex);
54 port_idx = wusb_port_no_to_idx(usb_dev->portnum);
55 __wusbhc_dev_disable(wusbhc, port_idx);
56 mutex_unlock(&wusbhc->mutex);
57 wusbhc_put(wusbhc);
58 return size;
59}
60static DEVICE_ATTR(wusb_disconnect, 0200, NULL, wusb_disconnect_store);
61
62static ssize_t wusb_cdid_show(struct device *dev,
63 struct device_attribute *attr, char *buf)
64{
65 ssize_t result;
66 struct wusb_dev *wusb_dev;
67
68 wusb_dev = wusb_dev_get_by_usb_dev(to_usb_device(dev));
69 if (wusb_dev == NULL)
70 return -ENODEV;
71 result = ckhdid_printf(buf, PAGE_SIZE, &wusb_dev->cdid);
72 strcat(buf, "\n");
73 wusb_dev_put(wusb_dev);
74 return result + 1;
75}
76static DEVICE_ATTR(wusb_cdid, 0444, wusb_cdid_show, NULL);
77
78static ssize_t wusb_ck_store(struct device *dev,
79 struct device_attribute *attr,
80 const char *buf, size_t size)
81{
82 int result;
83 struct usb_device *usb_dev;
84 struct wusbhc *wusbhc;
85 struct wusb_ckhdid ck;
86
87 result = sscanf(buf,
88 "%02hhx %02hhx %02hhx %02hhx "
89 "%02hhx %02hhx %02hhx %02hhx "
90 "%02hhx %02hhx %02hhx %02hhx "
91 "%02hhx %02hhx %02hhx %02hhx\n",
92 &ck.data[0] , &ck.data[1],
93 &ck.data[2] , &ck.data[3],
94 &ck.data[4] , &ck.data[5],
95 &ck.data[6] , &ck.data[7],
96 &ck.data[8] , &ck.data[9],
97 &ck.data[10], &ck.data[11],
98 &ck.data[12], &ck.data[13],
99 &ck.data[14], &ck.data[15]);
100 if (result != 16)
101 return -EINVAL;
102
103 usb_dev = to_usb_device(dev);
104 wusbhc = wusbhc_get_by_usb_dev(usb_dev);
105 if (wusbhc == NULL)
106 return -ENODEV;
107 result = wusb_dev_4way_handshake(wusbhc, usb_dev->wusb_dev, &ck);
108 memset(&ck, 0, sizeof(ck));
109 wusbhc_put(wusbhc);
110 return result < 0 ? result : size;
111}
112static DEVICE_ATTR(wusb_ck, 0200, NULL, wusb_ck_store);
113
114static struct attribute *wusb_dev_attrs[] = {
115 &dev_attr_wusb_disconnect.attr,
116 &dev_attr_wusb_cdid.attr,
117 &dev_attr_wusb_ck.attr,
118 NULL,
119};
120
121static struct attribute_group wusb_dev_attr_group = {
122 .name = NULL, /* we want them in the same directory */
123 .attrs = wusb_dev_attrs,
124};
125
126int wusb_dev_sysfs_add(struct wusbhc *wusbhc, struct usb_device *usb_dev,
127 struct wusb_dev *wusb_dev)
128{
129 int result = sysfs_create_group(&usb_dev->dev.kobj,
130 &wusb_dev_attr_group);
131 struct device *dev = &usb_dev->dev;
132 if (result < 0)
133 dev_err(dev, "Cannot register WUSB-dev attributes: %d\n",
134 result);
135 return result;
136}
137
138void wusb_dev_sysfs_rm(struct wusb_dev *wusb_dev)
139{
140 struct usb_device *usb_dev = wusb_dev->usb_dev;
141 if (usb_dev)
142 sysfs_remove_group(&usb_dev->dev.kobj, &wusb_dev_attr_group);
143}
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
new file mode 100644
index 000000000000..f45d777bef34
--- /dev/null
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -0,0 +1,1297 @@
1/*
2 * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
3 * Device Connect handling
4 *
5 * Copyright (C) 2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 * FIXME: this file needs to be broken up, it's grown too big
25 *
26 *
27 * WUSB1.0[7.1, 7.5.1, ]
28 *
29 * WUSB device connection is kind of messy. Some background:
30 *
31 * When a device wants to connect it scans the UWB radio channels
32 * looking for a WUSB Channel; a WUSB channel is defined by MMCs
33 * (Micro Managed Commands or something like that) [see
34 * Design-overview for more on this] .
35 *
36 * So, device scans the radio, finds MMCs and thus a host and checks
37 * when the next DNTS is. It sends a Device Notification Connect
38 * (DN_Connect); the host picks it up (through nep.c and notif.c, ends
39 * up in wusb_devconnect_ack(), which creates a wusb_dev structure in
40 * wusbhc->port[port_number].wusb_dev), assigns an unauth address
41 * to the device (this means from 0x80 to 0xfe) and sends, in the MMC
42 * a Connect Ack Information Element (ConnAck IE).
43 *
44 * So now the device now has a WUSB address. From now on, we use
45 * that to talk to it in the RPipes.
46 *
47 * ASSUMPTIONS:
48 *
49 * - We use the the as device address the port number where it is
50 * connected (port 0 doesn't exist). For unauth, it is 128 + that.
51 *
52 * ROADMAP:
53 *
54 * This file contains the logic for doing that--entry points:
55 *
56 * wusb_devconnect_ack() Ack a device until _acked() called.
57 * Called by notif.c:wusb_handle_dn_connect()
58 * when a DN_Connect is received.
59 *
60 * wusbhc_devconnect_auth() Called by rh.c:wusbhc_rh_port_reset() when
61 * doing the device connect sequence.
62 *
63 * wusb_devconnect_acked() Ack done, release resources.
64 *
65 * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn()
66 * for processing a DN_Alive pong from a device.
67 *
68 * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to
69 * process a disconenct request from a
70 * device.
71 *
72 * wusb_dev_reset() Called by rh.c:wusbhc_rh_port_reset() when
73 * resetting a device.
74 *
75 * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when
76 * disabling a port.
77 *
78 * wusb_devconnect_create() Called when creating the host by
79 * lc.c:wusbhc_create().
80 *
81 * wusb_devconnect_destroy() Cleanup called removing the host. Called
82 * by lc.c:wusbhc_destroy().
83 *
84 * Each Wireless USB host maintains a list of DN_Connect requests
85 * (actually we maintain a list of pending Connect Acks, the
86 * wusbhc->ca_list).
87 *
88 * LIFE CYCLE OF port->wusb_dev
89 *
90 * Before the @wusbhc structure put()s the reference it owns for
91 * port->wusb_dev [and clean the wusb_dev pointer], it needs to
92 * lock @wusbhc->mutex.
93 */
94
95#include <linux/jiffies.h>
96#include <linux/ctype.h>
97#include <linux/workqueue.h>
98#include "wusbhc.h"
99
100#undef D_LOCAL
101#define D_LOCAL 1
102#include <linux/uwb/debug.h>
103
104static void wusbhc_devconnect_acked_work(struct work_struct *work);
105
106static void wusb_dev_free(struct wusb_dev *wusb_dev)
107{
108 if (wusb_dev) {
109 kfree(wusb_dev->set_gtk_req);
110 usb_free_urb(wusb_dev->set_gtk_urb);
111 kfree(wusb_dev);
112 }
113}
114
115static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
116{
117 struct wusb_dev *wusb_dev;
118 struct urb *urb;
119 struct usb_ctrlrequest *req;
120
121 wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL);
122 if (wusb_dev == NULL)
123 goto err;
124
125 wusb_dev->wusbhc = wusbhc;
126
127 INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work);
128
129 urb = usb_alloc_urb(0, GFP_KERNEL);
130 if (urb == NULL)
131 goto err;
132
133 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
134 if (req == NULL)
135 goto err;
136
137 req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
138 req->bRequest = USB_REQ_SET_DESCRIPTOR;
139 req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index);
140 req->wIndex = 0;
141 req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength);
142
143 wusb_dev->set_gtk_urb = urb;
144 wusb_dev->set_gtk_req = req;
145
146 return wusb_dev;
147err:
148 wusb_dev_free(wusb_dev);
149 return NULL;
150}
151
152
153/*
154 * Using the Connect-Ack list, fill out the @wusbhc Connect-Ack WUSB IE
155 * properly so that it can be added to the MMC.
156 *
157 * We just get the @wusbhc->ca_list and fill out the first four ones or
158 * less (per-spec WUSB1.0[7.5, before T7-38). If the ConnectAck WUSB
159 * IE is not allocated, we alloc it.
160 *
161 * @wusbhc->mutex must be taken
162 */
163static void wusbhc_fill_cack_ie(struct wusbhc *wusbhc)
164{
165 unsigned cnt;
166 struct wusb_dev *dev_itr;
167 struct wuie_connect_ack *cack_ie;
168
169 cack_ie = &wusbhc->cack_ie;
170 cnt = 0;
171 list_for_each_entry(dev_itr, &wusbhc->cack_list, cack_node) {
172 cack_ie->blk[cnt].CDID = dev_itr->cdid;
173 cack_ie->blk[cnt].bDeviceAddress = dev_itr->addr;
174 if (++cnt >= WUIE_ELT_MAX)
175 break;
176 }
177 cack_ie->hdr.bLength = sizeof(cack_ie->hdr)
178 + cnt * sizeof(cack_ie->blk[0]);
179}
180
181/*
182 * Register a new device that wants to connect
183 *
184 * A new device wants to connect, so we add it to the Connect-Ack
185 * list. We give it an address in the unauthorized range (bit 8 set);
186 * user space will have to drive authorization further on.
187 *
188 * @dev_addr: address to use for the device (which is also the port
189 * number).
190 *
191 * @wusbhc->mutex must be taken
192 */
193static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc,
194 struct wusb_dn_connect *dnc,
195 const char *pr_cdid, u8 port_idx)
196{
197 struct device *dev = wusbhc->dev;
198 struct wusb_dev *wusb_dev;
199 int new_connection = wusb_dn_connect_new_connection(dnc);
200 u8 dev_addr;
201 int result;
202
203 /* Is it registered already? */
204 list_for_each_entry(wusb_dev, &wusbhc->cack_list, cack_node)
205 if (!memcmp(&wusb_dev->cdid, &dnc->CDID,
206 sizeof(wusb_dev->cdid)))
207 return wusb_dev;
208 /* We don't have it, create an entry, register it */
209 wusb_dev = wusb_dev_alloc(wusbhc);
210 if (wusb_dev == NULL)
211 return NULL;
212 wusb_dev_init(wusb_dev);
213 wusb_dev->cdid = dnc->CDID;
214 wusb_dev->port_idx = port_idx;
215
216 /*
217 * Devices are always available within the cluster reservation
218 * and since the hardware will take the intersection of the
219 * per-device availability and the cluster reservation, the
220 * per-device availability can simply be set to always
221 * available.
222 */
223 bitmap_fill(wusb_dev->availability.bm, UWB_NUM_MAS);
224
225 /* FIXME: handle reconnects instead of assuming connects are
226 always new. */
227 if (1 && new_connection == 0)
228 new_connection = 1;
229 if (new_connection) {
230 dev_addr = (port_idx + 2) | WUSB_DEV_ADDR_UNAUTH;
231
232 dev_info(dev, "Connecting new WUSB device to address %u, "
233 "port %u\n", dev_addr, port_idx);
234
235 result = wusb_set_dev_addr(wusbhc, wusb_dev, dev_addr);
236 if (result < 0)
237 return NULL;
238 }
239 wusb_dev->entry_ts = jiffies;
240 list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list);
241 wusbhc->cack_count++;
242 wusbhc_fill_cack_ie(wusbhc);
243 return wusb_dev;
244}
245
246/*
247 * Remove a Connect-Ack context entry from the HCs view
248 *
249 * @wusbhc->mutex must be taken
250 */
251static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
252{
253 struct device *dev = wusbhc->dev;
254 d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev);
255 list_del_init(&wusb_dev->cack_node);
256 wusbhc->cack_count--;
257 wusbhc_fill_cack_ie(wusbhc);
258 d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev);
259}
260
261/*
262 * @wusbhc->mutex must be taken */
263static
264void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
265{
266 struct device *dev = wusbhc->dev;
267 d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev);
268 wusbhc_cack_rm(wusbhc, wusb_dev);
269 if (wusbhc->cack_count)
270 wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr);
271 else
272 wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr);
273 d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev);
274}
275
276static void wusbhc_devconnect_acked_work(struct work_struct *work)
277{
278 struct wusb_dev *wusb_dev = container_of(work, struct wusb_dev,
279 devconnect_acked_work);
280 struct wusbhc *wusbhc = wusb_dev->wusbhc;
281
282 mutex_lock(&wusbhc->mutex);
283 wusbhc_devconnect_acked(wusbhc, wusb_dev);
284 mutex_unlock(&wusbhc->mutex);
285}
286
287/*
288 * Ack a device for connection
289 *
290 * FIXME: docs
291 *
292 * @pr_cdid: Printable CDID...hex Use @dnc->cdid for the real deal.
293 *
294 * So we get the connect ack IE (may have been allocated already),
295 * find an empty connect block, an empty virtual port, create an
296 * address with it (see below), make it an unauth addr [bit 7 set] and
297 * set the MMC.
298 *
299 * Addresses: because WUSB hosts have no downstream hubs, we can do a
300 * 1:1 mapping between 'port number' and device
301 * address. This simplifies many things, as during this
302 * initial connect phase the USB stack has no knoledge of
303 * the device and hasn't assigned an address yet--we know
304 * USB's choose_address() will use the same euristics we
305 * use here, so we can assume which address will be assigned.
306 *
307 * USB stack always assigns address 1 to the root hub, so
308 * to the port number we add 2 (thus virtual port #0 is
309 * addr #2).
310 *
311 * @wusbhc shall be referenced
312 */
313static
314void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
315 const char *pr_cdid)
316{
317 int result;
318 struct device *dev = wusbhc->dev;
319 struct wusb_dev *wusb_dev;
320 struct wusb_port *port;
321 unsigned idx, devnum;
322
323 d_fnstart(3, dev, "(%p, %p, %s)\n", wusbhc, dnc, pr_cdid);
324 mutex_lock(&wusbhc->mutex);
325
326 /* Check we are not handling it already */
327 for (idx = 0; idx < wusbhc->ports_max; idx++) {
328 port = wusb_port_by_idx(wusbhc, idx);
329 if (port->wusb_dev
330 && memcmp(&dnc->CDID, &port->wusb_dev->cdid, sizeof(dnc->CDID)) == 0)
331 goto error_unlock;
332 }
333 /* Look up those fake ports we have for a free one */
334 for (idx = 0; idx < wusbhc->ports_max; idx++) {
335 port = wusb_port_by_idx(wusbhc, idx);
336 if ((port->status & USB_PORT_STAT_POWER)
337 && !(port->status & USB_PORT_STAT_CONNECTION))
338 break;
339 }
340 if (idx >= wusbhc->ports_max) {
341 dev_err(dev, "Host controller can't connect more devices "
342 "(%u already connected); device %s rejected\n",
343 wusbhc->ports_max, pr_cdid);
344 /* NOTE: we could send a WUIE_Disconnect here, but we haven't
345 * event acked, so the device will eventually timeout the
346 * connection, right? */
347 goto error_unlock;
348 }
349
350 devnum = idx + 2;
351
352 /* Make sure we are using no crypto on that "virtual port" */
353 wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0);
354
355 /* Grab a filled in Connect-Ack context, fill out the
356 * Connect-Ack Wireless USB IE, set the MMC */
357 wusb_dev = wusbhc_cack_add(wusbhc, dnc, pr_cdid, idx);
358 if (wusb_dev == NULL)
359 goto error_unlock;
360 result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr);
361 if (result < 0)
362 goto error_unlock;
363 /* Give the device at least 2ms (WUSB1.0[7.5.1p3]), let's do
364 * three for a good measure */
365 msleep(3);
366 port->wusb_dev = wusb_dev;
367 port->status |= USB_PORT_STAT_CONNECTION;
368 port->change |= USB_PORT_STAT_C_CONNECTION;
369 port->reset_count = 0;
370 /* Now the port status changed to connected; khubd will
371 * pick the change up and try to reset the port to bring it to
372 * the enabled state--so this process returns up to the stack
373 * and it calls back into wusbhc_rh_port_reset() who will call
374 * devconnect_auth().
375 */
376error_unlock:
377 mutex_unlock(&wusbhc->mutex);
378 d_fnend(3, dev, "(%p, %p, %s) = void\n", wusbhc, dnc, pr_cdid);
379 return;
380
381}
382
383/*
384 * Disconnect a Wireless USB device from its fake port
385 *
386 * Marks the port as disconnected so that khubd can pick up the change
387 * and drops our knowledge about the device.
388 *
389 * Assumes there is a device connected
390 *
391 * @port_index: zero based port number
392 *
393 * NOTE: @wusbhc->mutex is locked
394 *
395 * WARNING: From here it is not very safe to access anything hanging off
396 * wusb_dev
397 */
398static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc,
399 struct wusb_port *port)
400{
401 struct device *dev = wusbhc->dev;
402 struct wusb_dev *wusb_dev = port->wusb_dev;
403
404 d_fnstart(3, dev, "(wusbhc %p, port %p)\n", wusbhc, port);
405 port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE
406 | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET
407 | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED);
408 port->change |= USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE;
409 if (wusb_dev) {
410 if (!list_empty(&wusb_dev->cack_node))
411 list_del_init(&wusb_dev->cack_node);
412 /* For the one in cack_add() */
413 wusb_dev_put(wusb_dev);
414 }
415 port->wusb_dev = NULL;
416 /* don't reset the reset_count to zero or wusbhc_rh_port_reset will get
417 * confused! We only reset to zero when we connect a new device.
418 */
419
420 /* After a device disconnects, change the GTK (see [WUSB]
421 * section 6.2.11.2). */
422 wusbhc_gtk_rekey(wusbhc);
423
424 d_fnend(3, dev, "(wusbhc %p, port %p) = void\n", wusbhc, port);
425 /* The Wireless USB part has forgotten about the device already; now
426 * khubd's timer will pick up the disconnection and remove the USB
427 * device from the system
428 */
429}
430
431/*
432 * Authenticate a device into the WUSB Cluster
433 *
434 * Called from the Root Hub code (rh.c:wusbhc_rh_port_reset()) when
435 * asking for a reset on a port that is not enabled (ie: first connect
436 * on the port).
437 *
438 * Performs the 4way handshake to allow the device to comunicate w/ the
439 * WUSB Cluster securely; once done, issue a request to the device for
440 * it to change to address 0.
441 *
442 * This mimics the reset step of Wired USB that once resetting a
443 * device, leaves the port in enabled state and the dev with the
444 * default address (0).
445 *
446 * WUSB1.0[7.1.2]
447 *
448 * @port_idx: port where the change happened--This is the index into
449 * the wusbhc port array, not the USB port number.
450 */
451int wusbhc_devconnect_auth(struct wusbhc *wusbhc, u8 port_idx)
452{
453 struct device *dev = wusbhc->dev;
454 struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx);
455
456 d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx);
457 port->status &= ~USB_PORT_STAT_RESET;
458 port->status |= USB_PORT_STAT_ENABLE;
459 port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE;
460 d_fnend(3, dev, "(%p, %u) = 0\n", wusbhc, port_idx);
461 return 0;
462}
463
464/*
465 * Refresh the list of keep alives to emit in the MMC
466 *
467 * Some devices don't respond to keep alives unless they've been
468 * authenticated, so skip unauthenticated devices.
469 *
470 * We only publish the first four devices that have a coming timeout
471 * condition. Then when we are done processing those, we go for the
472 * next ones. We ignore the ones that have timed out already (they'll
473 * be purged).
474 *
475 * This might cause the first devices to timeout the last devices in
476 * the port array...FIXME: come up with a better algorithm?
477 *
478 * Note we can't do much about MMC's ops errors; we hope next refresh
479 * will kind of handle it.
480 *
481 * NOTE: @wusbhc->mutex is locked
482 */
483static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
484{
485 struct device *dev = wusbhc->dev;
486 unsigned cnt;
487 struct wusb_dev *wusb_dev;
488 struct wusb_port *wusb_port;
489 struct wuie_keep_alive *ie = &wusbhc->keep_alive_ie;
490 unsigned keep_alives, old_keep_alives;
491
492 old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr);
493 keep_alives = 0;
494 for (cnt = 0;
495 keep_alives <= WUIE_ELT_MAX && cnt < wusbhc->ports_max;
496 cnt++) {
497 unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout);
498
499 wusb_port = wusb_port_by_idx(wusbhc, cnt);
500 wusb_dev = wusb_port->wusb_dev;
501
502 if (wusb_dev == NULL)
503 continue;
504 if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated)
505 continue;
506
507 if (time_after(jiffies, wusb_dev->entry_ts + tt)) {
508 dev_err(dev, "KEEPALIVE: device %u timed out\n",
509 wusb_dev->addr);
510 __wusbhc_dev_disconnect(wusbhc, wusb_port);
511 } else if (time_after(jiffies, wusb_dev->entry_ts + tt/2)) {
512 /* Approaching timeout cut out, need to refresh */
513 ie->bDeviceAddress[keep_alives++] = wusb_dev->addr;
514 }
515 }
516 if (keep_alives & 0x1) /* pad to even number ([WUSB] section 7.5.9) */
517 ie->bDeviceAddress[keep_alives++] = 0x7f;
518 ie->hdr.bLength = sizeof(ie->hdr) +
519 keep_alives*sizeof(ie->bDeviceAddress[0]);
520 if (keep_alives > 0)
521 wusbhc_mmcie_set(wusbhc, 10, 5, &ie->hdr);
522 else if (old_keep_alives != 0)
523 wusbhc_mmcie_rm(wusbhc, &ie->hdr);
524}
525
526/*
527 * Do a run through all devices checking for timeouts
528 */
529static void wusbhc_keep_alive_run(struct work_struct *ws)
530{
531 struct delayed_work *dw =
532 container_of(ws, struct delayed_work, work);
533 struct wusbhc *wusbhc =
534 container_of(dw, struct wusbhc, keep_alive_timer);
535
536 d_fnstart(5, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
537 if (wusbhc->active) {
538 mutex_lock(&wusbhc->mutex);
539 __wusbhc_keep_alive(wusbhc);
540 mutex_unlock(&wusbhc->mutex);
541 queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
542 (wusbhc->trust_timeout * CONFIG_HZ)/1000/2);
543 }
544 d_fnend(5, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc);
545 return;
546}
547
548/*
549 * Find the wusb_dev from its device address.
550 *
551 * The device can be found directly from the address (see
552 * wusb_cack_add() for where the device address is set to port_idx
553 * +2), except when the address is zero.
554 */
555static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr)
556{
557 int p;
558
559 if (addr == 0xff) /* unconnected */
560 return NULL;
561
562 if (addr > 0) {
563 int port = (addr & ~0x80) - 2;
564 if (port < 0 || port >= wusbhc->ports_max)
565 return NULL;
566 return wusb_port_by_idx(wusbhc, port)->wusb_dev;
567 }
568
569 /* Look for the device with address 0. */
570 for (p = 0; p < wusbhc->ports_max; p++) {
571 struct wusb_dev *wusb_dev = wusb_port_by_idx(wusbhc, p)->wusb_dev;
572 if (wusb_dev && wusb_dev->addr == addr)
573 return wusb_dev;
574 }
575 return NULL;
576}
577
578/*
579 * Handle a DN_Alive notification (WUSB1.0[7.6.1])
580 *
581 * This just updates the device activity timestamp and then refreshes
582 * the keep alive IE.
583 *
584 * @wusbhc shall be referenced and unlocked
585 */
586static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
587{
588 struct device *dev = wusbhc->dev;
589
590 d_printf(2, dev, "DN ALIVE: device 0x%02x pong\n", wusb_dev->addr);
591
592 mutex_lock(&wusbhc->mutex);
593 wusb_dev->entry_ts = jiffies;
594 __wusbhc_keep_alive(wusbhc);
595 mutex_unlock(&wusbhc->mutex);
596}
597
598/*
599 * Handle a DN_Connect notification (WUSB1.0[7.6.1])
600 *
601 * @wusbhc
602 * @pkt_hdr
603 * @size: Size of the buffer where the notification resides; if the
604 * notification data suggests there should be more data than
605 * available, an error will be signaled and the whole buffer
606 * consumed.
607 *
608 * @wusbhc->mutex shall be held
609 */
610static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc,
611 struct wusb_dn_hdr *dn_hdr,
612 size_t size)
613{
614 struct device *dev = wusbhc->dev;
615 struct wusb_dn_connect *dnc;
616 char pr_cdid[WUSB_CKHDID_STRSIZE];
617 static const char *beacon_behaviour[] = {
618 "reserved",
619 "self-beacon",
620 "directed-beacon",
621 "no-beacon"
622 };
623
624 d_fnstart(3, dev, "(%p, %p, %zu)\n", wusbhc, dn_hdr, size);
625 if (size < sizeof(*dnc)) {
626 dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n",
627 size, sizeof(*dnc));
628 goto out;
629 }
630
631 dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr);
632 ckhdid_printf(pr_cdid, sizeof(pr_cdid), &dnc->CDID);
633 dev_info(dev, "DN CONNECT: device %s @ %x (%s) wants to %s\n",
634 pr_cdid,
635 wusb_dn_connect_prev_dev_addr(dnc),
636 beacon_behaviour[wusb_dn_connect_beacon_behavior(dnc)],
637 wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect");
638 /* ACK the connect */
639 wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid);
640out:
641 d_fnend(3, dev, "(%p, %p, %zu) = void\n",
642 wusbhc, dn_hdr, size);
643 return;
644}
645
646/*
647 * Handle a DN_Disconnect notification (WUSB1.0[7.6.1])
648 *
649 * Device is going down -- do the disconnect.
650 *
651 * @wusbhc shall be referenced and unlocked
652 */
653static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
654{
655 struct device *dev = wusbhc->dev;
656
657 dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr);
658
659 mutex_lock(&wusbhc->mutex);
660 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx));
661 mutex_unlock(&wusbhc->mutex);
662}
663
664/*
665 * Reset a WUSB device on a HWA
666 *
667 * @wusbhc
668 * @port_idx Index of the port where the device is
669 *
670 * In Wireless USB, a reset is more or less equivalent to a full
671 * disconnect; so we just do a full disconnect and send the device a
672 * Device Reset IE (WUSB1.0[7.5.11]) giving it a few millisecs (6 MMCs).
673 *
674 * @wusbhc should be refcounted and unlocked
675 */
676int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port_idx)
677{
678 int result;
679 struct device *dev = wusbhc->dev;
680 struct wusb_dev *wusb_dev;
681 struct wuie_reset *ie;
682
683 d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx);
684 mutex_lock(&wusbhc->mutex);
685 result = 0;
686 wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
687 if (wusb_dev == NULL) {
688 /* reset no device? ignore */
689 dev_dbg(dev, "RESET: no device at port %u, ignoring\n",
690 port_idx);
691 goto error_unlock;
692 }
693 result = -ENOMEM;
694 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
695 if (ie == NULL)
696 goto error_unlock;
697 ie->hdr.bLength = sizeof(ie->hdr) + sizeof(ie->CDID);
698 ie->hdr.bIEIdentifier = WUIE_ID_RESET_DEVICE;
699 ie->CDID = wusb_dev->cdid;
700 result = wusbhc_mmcie_set(wusbhc, 0xff, 6, &ie->hdr);
701 if (result < 0) {
702 dev_err(dev, "RESET: cant's set MMC: %d\n", result);
703 goto error_kfree;
704 }
705 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
706
707 /* 120ms, hopefully 6 MMCs (FIXME) */
708 msleep(120);
709 wusbhc_mmcie_rm(wusbhc, &ie->hdr);
710error_kfree:
711 kfree(ie);
712error_unlock:
713 mutex_unlock(&wusbhc->mutex);
714 d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result);
715 return result;
716}
717
718/*
719 * Handle a Device Notification coming a host
720 *
721 * The Device Notification comes from a host (HWA, DWA or WHCI)
722 * wrapped in a set of headers. Somebody else has peeled off those
723 * headers for us and we just get one Device Notifications.
724 *
725 * Invalid DNs (e.g., too short) are discarded.
726 *
727 * @wusbhc shall be referenced
728 *
729 * FIXMES:
730 * - implement priorities as in WUSB1.0[Table 7-55]?
731 */
732void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr,
733 struct wusb_dn_hdr *dn_hdr, size_t size)
734{
735 struct device *dev = wusbhc->dev;
736 struct wusb_dev *wusb_dev;
737
738 d_fnstart(3, dev, "(%p, %p)\n", wusbhc, dn_hdr);
739
740 if (size < sizeof(struct wusb_dn_hdr)) {
741 dev_err(dev, "DN data shorter than DN header (%d < %d)\n",
742 (int)size, (int)sizeof(struct wusb_dn_hdr));
743 goto out;
744 }
745
746 wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
747 if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) {
748 dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n",
749 dn_hdr->bType, srcaddr);
750 goto out;
751 }
752
753 switch (dn_hdr->bType) {
754 case WUSB_DN_CONNECT:
755 wusbhc_handle_dn_connect(wusbhc, dn_hdr, size);
756 break;
757 case WUSB_DN_ALIVE:
758 wusbhc_handle_dn_alive(wusbhc, wusb_dev);
759 break;
760 case WUSB_DN_DISCONNECT:
761 wusbhc_handle_dn_disconnect(wusbhc, wusb_dev);
762 break;
763 case WUSB_DN_MASAVAILCHANGED:
764 case WUSB_DN_RWAKE:
765 case WUSB_DN_SLEEP:
766 /* FIXME: handle these DNs. */
767 break;
768 case WUSB_DN_EPRDY:
769 /* The hardware handles these. */
770 break;
771 default:
772 dev_warn(dev, "unknown DN %u (%d octets) from %u\n",
773 dn_hdr->bType, (int)size, srcaddr);
774 }
775out:
776 d_fnend(3, dev, "(%p, %p) = void\n", wusbhc, dn_hdr);
777 return;
778}
779EXPORT_SYMBOL_GPL(wusbhc_handle_dn);
780
781/*
782 * Disconnect a WUSB device from a the cluster
783 *
784 * @wusbhc
785 * @port Fake port where the device is (wusbhc index, not USB port number).
786 *
787 * In Wireless USB, a disconnect is basically telling the device he is
788 * being disconnected and forgetting about him.
789 *
790 * We send the device a Device Disconnect IE (WUSB1.0[7.5.11]) for 100
791 * ms and then keep going.
792 *
793 * We don't do much in case of error; we always pretend we disabled
794 * the port and disconnected the device. If physically the request
795 * didn't get there (many things can fail in the way there), the stack
796 * will reject the device's communication attempts.
797 *
798 * @wusbhc should be refcounted and locked
799 */
800void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port_idx)
801{
802 int result;
803 struct device *dev = wusbhc->dev;
804 struct wusb_dev *wusb_dev;
805 struct wuie_disconnect *ie;
806
807 d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx);
808 result = 0;
809 wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
810 if (wusb_dev == NULL) {
811 /* reset no device? ignore */
812 dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n",
813 port_idx);
814 goto error;
815 }
816 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
817
818 result = -ENOMEM;
819 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
820 if (ie == NULL)
821 goto error;
822 ie->hdr.bLength = sizeof(*ie);
823 ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT;
824 ie->bDeviceAddress = wusb_dev->addr;
825 result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr);
826 if (result < 0) {
827 dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result);
828 goto error_kfree;
829 }
830
831 /* 120ms, hopefully 6 MMCs */
832 msleep(100);
833 wusbhc_mmcie_rm(wusbhc, &ie->hdr);
834error_kfree:
835 kfree(ie);
836error:
837 d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result);
838 return;
839}
840
841static void wusb_cap_descr_printf(const unsigned level, struct device *dev,
842 const struct usb_wireless_cap_descriptor *wcd)
843{
844 d_printf(level, dev,
845 "WUSB Capability Descriptor\n"
846 " bDevCapabilityType 0x%02x\n"
847 " bmAttributes 0x%02x\n"
848 " wPhyRates 0x%04x\n"
849 " bmTFITXPowerInfo 0x%02x\n"
850 " bmFFITXPowerInfo 0x%02x\n"
851 " bmBandGroup 0x%04x\n"
852 " bReserved 0x%02x\n",
853 wcd->bDevCapabilityType,
854 wcd->bmAttributes,
855 le16_to_cpu(wcd->wPHYRates),
856 wcd->bmTFITXPowerInfo,
857 wcd->bmFFITXPowerInfo,
858 wcd->bmBandGroup,
859 wcd->bReserved);
860}
861
862/*
863 * Walk over the BOS descriptor, verify and grok it
864 *
865 * @usb_dev: referenced
866 * @wusb_dev: referenced and unlocked
867 *
868 * The BOS descriptor is defined at WUSB1.0[7.4.1], and it defines a
869 * "flexible" way to wrap all kinds of descriptors inside an standard
870 * descriptor (wonder why they didn't use normal descriptors,
871 * btw). Not like they lack code.
872 *
873 * At the end we go to look for the WUSB Device Capabilities
874 * (WUSB1.0[7.4.1.1]) that is wrapped in a device capability descriptor
875 * that is part of the BOS descriptor set. That tells us what does the
876 * device support (dual role, beacon type, UWB PHY rates).
877 */
878static int wusb_dev_bos_grok(struct usb_device *usb_dev,
879 struct wusb_dev *wusb_dev,
880 struct usb_bos_descriptor *bos, size_t desc_size)
881{
882 ssize_t result;
883 struct device *dev = &usb_dev->dev;
884 void *itr, *top;
885
886 /* Walk over BOS capabilities, verify them */
887 itr = (void *)bos + sizeof(*bos);
888 top = itr + desc_size - sizeof(*bos);
889 while (itr < top) {
890 struct usb_dev_cap_header *cap_hdr = itr;
891 size_t cap_size;
892 u8 cap_type;
893 if (top - itr < sizeof(*cap_hdr)) {
894 dev_err(dev, "Device BUG? premature end of BOS header "
895 "data [offset 0x%02x]: only %zu bytes left\n",
896 (int)(itr - (void *)bos), top - itr);
897 result = -ENOSPC;
898 goto error_bad_cap;
899 }
900 cap_size = cap_hdr->bLength;
901 cap_type = cap_hdr->bDevCapabilityType;
902 d_printf(4, dev, "BOS Capability: 0x%02x (%zu bytes)\n",
903 cap_type, cap_size);
904 if (cap_size == 0)
905 break;
906 if (cap_size > top - itr) {
907 dev_err(dev, "Device BUG? premature end of BOS data "
908 "[offset 0x%02x cap %02x %zu bytes]: "
909 "only %zu bytes left\n",
910 (int)(itr - (void *)bos),
911 cap_type, cap_size, top - itr);
912 result = -EBADF;
913 goto error_bad_cap;
914 }
915 d_dump(3, dev, itr, cap_size);
916 switch (cap_type) {
917 case USB_CAP_TYPE_WIRELESS_USB:
918 if (cap_size != sizeof(*wusb_dev->wusb_cap_descr))
919 dev_err(dev, "Device BUG? WUSB Capability "
920 "descriptor is %zu bytes vs %zu "
921 "needed\n", cap_size,
922 sizeof(*wusb_dev->wusb_cap_descr));
923 else {
924 wusb_dev->wusb_cap_descr = itr;
925 wusb_cap_descr_printf(3, dev, itr);
926 }
927 break;
928 default:
929 dev_err(dev, "BUG? Unknown BOS capability 0x%02x "
930 "(%zu bytes) at offset 0x%02x\n", cap_type,
931 cap_size, (int)(itr - (void *)bos));
932 }
933 itr += cap_size;
934 }
935 result = 0;
936error_bad_cap:
937 return result;
938}
939
940/*
941 * Add information from the BOS descriptors to the device
942 *
943 * @usb_dev: referenced
944 * @wusb_dev: referenced and unlocked
945 *
946 * So what we do is we alloc a space for the BOS descriptor of 64
947 * bytes; read the first four bytes which include the wTotalLength
948 * field (WUSB1.0[T7-26]) and if it fits in those 64 bytes, read the
949 * whole thing. If not we realloc to that size.
950 *
951 * Then we call the groking function, that will fill up
952 * wusb_dev->wusb_cap_descr, which is what we'll need later on.
953 */
954static int wusb_dev_bos_add(struct usb_device *usb_dev,
955 struct wusb_dev *wusb_dev)
956{
957 ssize_t result;
958 struct device *dev = &usb_dev->dev;
959 struct usb_bos_descriptor *bos;
960 size_t alloc_size = 32, desc_size = 4;
961
962 bos = kmalloc(alloc_size, GFP_KERNEL);
963 if (bos == NULL)
964 return -ENOMEM;
965 result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size);
966 if (result < 4) {
967 dev_err(dev, "Can't get BOS descriptor or too short: %zd\n",
968 result);
969 goto error_get_descriptor;
970 }
971 desc_size = le16_to_cpu(bos->wTotalLength);
972 if (desc_size >= alloc_size) {
973 kfree(bos);
974 alloc_size = desc_size;
975 bos = kmalloc(alloc_size, GFP_KERNEL);
976 if (bos == NULL)
977 return -ENOMEM;
978 }
979 result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size);
980 if (result < 0 || result != desc_size) {
981 dev_err(dev, "Can't get BOS descriptor or too short (need "
982 "%zu bytes): %zd\n", desc_size, result);
983 goto error_get_descriptor;
984 }
985 if (result < sizeof(*bos)
986 || le16_to_cpu(bos->wTotalLength) != desc_size) {
987 dev_err(dev, "Can't get BOS descriptor or too short (need "
988 "%zu bytes): %zd\n", desc_size, result);
989 goto error_get_descriptor;
990 }
991 d_printf(2, dev, "Got BOS descriptor %zd bytes, %u capabilities\n",
992 result, bos->bNumDeviceCaps);
993 d_dump(2, dev, bos, result);
994 result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result);
995 if (result < 0)
996 goto error_bad_bos;
997 wusb_dev->bos = bos;
998 return 0;
999
1000error_bad_bos:
1001error_get_descriptor:
1002 kfree(bos);
1003 wusb_dev->wusb_cap_descr = NULL;
1004 return result;
1005}
1006
1007static void wusb_dev_bos_rm(struct wusb_dev *wusb_dev)
1008{
1009 kfree(wusb_dev->bos);
1010 wusb_dev->wusb_cap_descr = NULL;
1011};
1012
1013static struct usb_wireless_cap_descriptor wusb_cap_descr_default = {
1014 .bLength = sizeof(wusb_cap_descr_default),
1015 .bDescriptorType = USB_DT_DEVICE_CAPABILITY,
1016 .bDevCapabilityType = USB_CAP_TYPE_WIRELESS_USB,
1017
1018 .bmAttributes = USB_WIRELESS_BEACON_NONE,
1019 .wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53),
1020 .bmTFITXPowerInfo = 0,
1021 .bmFFITXPowerInfo = 0,
1022 .bmBandGroup = cpu_to_le16(0x0001), /* WUSB1.0[7.4.1] bottom */
1023 .bReserved = 0
1024};
1025
1026/*
1027 * USB stack's device addition Notifier Callback
1028 *
1029 * Called from drivers/usb/core/hub.c when a new device is added; we
1030 * use this hook to perform certain WUSB specific setup work on the
1031 * new device. As well, it is the first time we can connect the
1032 * wusb_dev and the usb_dev. So we note it down in wusb_dev and take a
1033 * reference that we'll drop.
1034 *
1035 * First we need to determine if the device is a WUSB device (else we
1036 * ignore it). For that we use the speed setting (USB_SPEED_VARIABLE)
1037 * [FIXME: maybe we'd need something more definitive]. If so, we track
1038 * it's usb_busd and from there, the WUSB HC.
1039 *
1040 * Because all WUSB HCs are contained in a 'struct wusbhc', voila, we
1041 * get the wusbhc for the device.
1042 *
1043 * We have a reference on @usb_dev (as we are called at the end of its
1044 * enumeration).
1045 *
1046 * NOTE: @usb_dev locked
1047 */
1048static void wusb_dev_add_ncb(struct usb_device *usb_dev)
1049{
1050 int result = 0;
1051 struct wusb_dev *wusb_dev;
1052 struct wusbhc *wusbhc;
1053 struct device *dev = &usb_dev->dev;
1054 u8 port_idx;
1055
1056 if (usb_dev->wusb == 0 || usb_dev->devnum == 1)
1057 return; /* skip non wusb and wusb RHs */
1058
1059 d_fnstart(3, dev, "(usb_dev %p)\n", usb_dev);
1060
1061 wusbhc = wusbhc_get_by_usb_dev(usb_dev);
1062 if (wusbhc == NULL)
1063 goto error_nodev;
1064 mutex_lock(&wusbhc->mutex);
1065 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
1066 port_idx = wusb_port_no_to_idx(usb_dev->portnum);
1067 mutex_unlock(&wusbhc->mutex);
1068 if (wusb_dev == NULL)
1069 goto error_nodev;
1070 wusb_dev->usb_dev = usb_get_dev(usb_dev);
1071 usb_dev->wusb_dev = wusb_dev_get(wusb_dev);
1072 result = wusb_dev_sec_add(wusbhc, usb_dev, wusb_dev);
1073 if (result < 0) {
1074 dev_err(dev, "Cannot enable security: %d\n", result);
1075 goto error_sec_add;
1076 }
1077 /* Now query the device for it's BOS and attach it to wusb_dev */
1078 result = wusb_dev_bos_add(usb_dev, wusb_dev);
1079 if (result < 0) {
1080 dev_err(dev, "Cannot get BOS descriptors: %d\n", result);
1081 goto error_bos_add;
1082 }
1083 result = wusb_dev_sysfs_add(wusbhc, usb_dev, wusb_dev);
1084 if (result < 0)
1085 goto error_add_sysfs;
1086out:
1087 wusb_dev_put(wusb_dev);
1088 wusbhc_put(wusbhc);
1089error_nodev:
1090 d_fnend(3, dev, "(usb_dev %p) = void\n", usb_dev);
1091 return;
1092
1093 wusb_dev_sysfs_rm(wusb_dev);
1094error_add_sysfs:
1095 wusb_dev_bos_rm(wusb_dev);
1096error_bos_add:
1097 wusb_dev_sec_rm(wusb_dev);
1098error_sec_add:
1099 mutex_lock(&wusbhc->mutex);
1100 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
1101 mutex_unlock(&wusbhc->mutex);
1102 goto out;
1103}
1104
1105/*
1106 * Undo all the steps done at connection by the notifier callback
1107 *
1108 * NOTE: @usb_dev locked
1109 */
1110static void wusb_dev_rm_ncb(struct usb_device *usb_dev)
1111{
1112 struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
1113
1114 if (usb_dev->wusb == 0 || usb_dev->devnum == 1)
1115 return; /* skip non wusb and wusb RHs */
1116
1117 wusb_dev_sysfs_rm(wusb_dev);
1118 wusb_dev_bos_rm(wusb_dev);
1119 wusb_dev_sec_rm(wusb_dev);
1120 wusb_dev->usb_dev = NULL;
1121 usb_dev->wusb_dev = NULL;
1122 wusb_dev_put(wusb_dev);
1123 usb_put_dev(usb_dev);
1124}
1125
1126/*
1127 * Handle notifications from the USB stack (notifier call back)
1128 *
1129 * This is called when the USB stack does a
1130 * usb_{bus,device}_{add,remove}() so we can do WUSB specific
1131 * handling. It is called with [for the case of
1132 * USB_DEVICE_{ADD,REMOVE} with the usb_dev locked.
1133 */
1134int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
1135 void *priv)
1136{
1137 int result = NOTIFY_OK;
1138
1139 switch (val) {
1140 case USB_DEVICE_ADD:
1141 wusb_dev_add_ncb(priv);
1142 break;
1143 case USB_DEVICE_REMOVE:
1144 wusb_dev_rm_ncb(priv);
1145 break;
1146 case USB_BUS_ADD:
1147 /* ignore (for now) */
1148 case USB_BUS_REMOVE:
1149 break;
1150 default:
1151 WARN_ON(1);
1152 result = NOTIFY_BAD;
1153 };
1154 return result;
1155}
1156
1157/*
1158 * Return a referenced wusb_dev given a @wusbhc and @usb_dev
1159 */
1160struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *wusbhc,
1161 struct usb_device *usb_dev)
1162{
1163 struct wusb_dev *wusb_dev;
1164 u8 port_idx;
1165
1166 port_idx = wusb_port_no_to_idx(usb_dev->portnum);
1167 BUG_ON(port_idx > wusbhc->ports_max);
1168 wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
1169 if (wusb_dev != NULL) /* ops, device is gone */
1170 wusb_dev_get(wusb_dev);
1171 return wusb_dev;
1172}
1173EXPORT_SYMBOL_GPL(__wusb_dev_get_by_usb_dev);
1174
1175void wusb_dev_destroy(struct kref *_wusb_dev)
1176{
1177 struct wusb_dev *wusb_dev
1178 = container_of(_wusb_dev, struct wusb_dev, refcnt);
1179 list_del_init(&wusb_dev->cack_node);
1180 wusb_dev_free(wusb_dev);
1181 d_fnend(1, NULL, "%s (wusb_dev %p) = void\n", __func__, wusb_dev);
1182}
1183EXPORT_SYMBOL_GPL(wusb_dev_destroy);
1184
1185/*
1186 * Create all the device connect handling infrastructure
1187 *
1188 * This is basically the device info array, Connect Acknowledgement
1189 * (cack) lists, keep-alive timers (and delayed work thread).
1190 */
1191int wusbhc_devconnect_create(struct wusbhc *wusbhc)
1192{
1193 d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
1194
1195 wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE;
1196 wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr);
1197 INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run);
1198
1199 wusbhc->cack_ie.hdr.bIEIdentifier = WUIE_ID_CONNECTACK;
1200 wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr);
1201 INIT_LIST_HEAD(&wusbhc->cack_list);
1202
1203 d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc);
1204 return 0;
1205}
1206
1207/*
1208 * Release all resources taken by the devconnect stuff
1209 */
1210void wusbhc_devconnect_destroy(struct wusbhc *wusbhc)
1211{
1212 d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
1213 d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc);
1214}
1215
1216/*
1217 * wusbhc_devconnect_start - start accepting device connections
1218 * @wusbhc: the WUSB HC
1219 *
1220 * Sets the Host Info IE to accept all new connections.
1221 *
1222 * FIXME: This also enables the keep alives but this is not necessary
1223 * until there are connected and authenticated devices.
1224 */
1225int wusbhc_devconnect_start(struct wusbhc *wusbhc,
1226 const struct wusb_ckhdid *chid)
1227{
1228 struct device *dev = wusbhc->dev;
1229 struct wuie_host_info *hi;
1230 int result;
1231
1232 hi = kzalloc(sizeof(*hi), GFP_KERNEL);
1233 if (hi == NULL)
1234 return -ENOMEM;
1235
1236 hi->hdr.bLength = sizeof(*hi);
1237 hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO;
1238 hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL);
1239 hi->CHID = *chid;
1240 result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr);
1241 if (result < 0) {
1242 dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result);
1243 goto error_mmcie_set;
1244 }
1245 wusbhc->wuie_host_info = hi;
1246
1247 queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
1248 (wusbhc->trust_timeout*CONFIG_HZ)/1000/2);
1249
1250 return 0;
1251
1252error_mmcie_set:
1253 kfree(hi);
1254 return result;
1255}
1256
1257/*
1258 * wusbhc_devconnect_stop - stop managing connected devices
1259 * @wusbhc: the WUSB HC
1260 *
1261 * Removes the Host Info IE and stops the keep alives.
1262 *
1263 * FIXME: should this disconnect all devices?
1264 */
1265void wusbhc_devconnect_stop(struct wusbhc *wusbhc)
1266{
1267 cancel_delayed_work_sync(&wusbhc->keep_alive_timer);
1268 WARN_ON(!list_empty(&wusbhc->cack_list));
1269
1270 wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr);
1271 kfree(wusbhc->wuie_host_info);
1272 wusbhc->wuie_host_info = NULL;
1273}
1274
1275/*
1276 * wusb_set_dev_addr - set the WUSB device address used by the host
1277 * @wusbhc: the WUSB HC the device is connect to
1278 * @wusb_dev: the WUSB device
1279 * @addr: new device address
1280 */
1281int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, u8 addr)
1282{
1283 int result;
1284
1285 wusb_dev->addr = addr;
1286 result = wusbhc->dev_info_set(wusbhc, wusb_dev);
1287 if (result < 0)
1288 dev_err(wusbhc->dev, "device %d: failed to set device "
1289 "address\n", wusb_dev->port_idx);
1290 else
1291 dev_info(wusbhc->dev, "device %d: %s addr %u\n",
1292 wusb_dev->port_idx,
1293 (addr & WUSB_DEV_ADDR_UNAUTH) ? "unauth" : "auth",
1294 wusb_dev->addr);
1295
1296 return result;
1297}
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
new file mode 100644
index 000000000000..cfa77a01cebd
--- /dev/null
+++ b/drivers/usb/wusbcore/mmc.c
@@ -0,0 +1,321 @@
1/*
2 * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
3 * MMC (Microscheduled Management Command) handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * WUIEs and MMC IEs...well, they are almost the same at the end. MMC
24 * IEs are Wireless USB IEs that go into the MMC period...[what is
25 * that? look in Design-overview.txt].
26 *
27 *
28 * This is a simple subsystem to keep track of which IEs are being
29 * sent by the host in the MMC period.
30 *
31 * For each WUIE we ask to send, we keep it in an array, so we can
32 * request its removal later, or replace the content. They are tracked
33 * by pointer, so be sure to use the same pointer if you want to
34 * remove it or update the contents.
35 *
36 * FIXME:
37 * - add timers that autoremove intervalled IEs?
38 */
39#include <linux/usb/wusb.h>
40#include "wusbhc.h"
41
42/* Initialize the MMCIEs handling mechanism */
43int wusbhc_mmcie_create(struct wusbhc *wusbhc)
44{
45 u8 mmcies = wusbhc->mmcies_max;
46 wusbhc->mmcie = kcalloc(mmcies, sizeof(wusbhc->mmcie[0]), GFP_KERNEL);
47 if (wusbhc->mmcie == NULL)
48 return -ENOMEM;
49 mutex_init(&wusbhc->mmcie_mutex);
50 return 0;
51}
52
53/* Release resources used by the MMCIEs handling mechanism */
54void wusbhc_mmcie_destroy(struct wusbhc *wusbhc)
55{
56 kfree(wusbhc->mmcie);
57}
58
59/*
60 * Add or replace an MMC Wireless USB IE.
61 *
62 * @interval: See WUSB1.0[8.5.3.1]
63 * @repeat_cnt: See WUSB1.0[8.5.3.1]
64 * @handle: See WUSB1.0[8.5.3.1]
65 * @wuie: Pointer to the header of the WUSB IE data to add.
66 * MUST BE allocated in a kmalloc buffer (no stack or
67 * vmalloc).
68 * THE CALLER ALWAYS OWNS THE POINTER (we don't free it
69 * on remove, we just forget about it).
70 * @returns: 0 if ok, < 0 errno code on error.
71 *
72 * Goes over the *whole* @wusbhc->mmcie array looking for (a) the
73 * first free spot and (b) if @wuie is already in the array (aka:
74 * transmitted in the MMCs) the spot were it is.
75 *
76 * If present, we "overwrite it" (update).
77 *
78 *
79 * NOTE: Need special ordering rules -- see below WUSB1.0 Table 7-38.
80 * The host uses the handle as the 'sort' index. We
81 * allocate the last one always for the WUIE_ID_HOST_INFO, and
82 * the rest, first come first serve in inverse order.
83 *
84 * Host software must make sure that it adds the other IEs in
85 * the right order... the host hardware is responsible for
86 * placing the WCTA IEs in the right place with the other IEs
87 * set by host software.
88 *
89 * NOTE: we can access wusbhc->wa_descr without locking because it is
90 * read only.
91 */
92int wusbhc_mmcie_set(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
93 struct wuie_hdr *wuie)
94{
95 int result = -ENOBUFS;
96 unsigned handle, itr;
97
98 /* Search a handle, taking into account the ordering */
99 mutex_lock(&wusbhc->mmcie_mutex);
100 switch (wuie->bIEIdentifier) {
101 case WUIE_ID_HOST_INFO:
102 /* Always last */
103 handle = wusbhc->mmcies_max - 1;
104 break;
105 case WUIE_ID_ISOCH_DISCARD:
106 dev_err(wusbhc->dev, "Special ordering case for WUIE ID 0x%x "
107 "unimplemented\n", wuie->bIEIdentifier);
108 result = -ENOSYS;
109 goto error_unlock;
110 default:
111 /* search for it or find the last empty slot */
112 handle = ~0;
113 for (itr = 0; itr < wusbhc->mmcies_max - 1; itr++) {
114 if (wusbhc->mmcie[itr] == wuie) {
115 handle = itr;
116 break;
117 }
118 if (wusbhc->mmcie[itr] == NULL)
119 handle = itr;
120 }
121 if (handle == ~0)
122 goto error_unlock;
123 }
124 result = (wusbhc->mmcie_add)(wusbhc, interval, repeat_cnt, handle,
125 wuie);
126 if (result >= 0)
127 wusbhc->mmcie[handle] = wuie;
128error_unlock:
129 mutex_unlock(&wusbhc->mmcie_mutex);
130 return result;
131}
132EXPORT_SYMBOL_GPL(wusbhc_mmcie_set);
133
134/*
135 * Remove an MMC IE previously added with wusbhc_mmcie_set()
136 *
137 * @wuie Pointer used to add the WUIE
138 */
139void wusbhc_mmcie_rm(struct wusbhc *wusbhc, struct wuie_hdr *wuie)
140{
141 int result;
142 unsigned handle, itr;
143
144 mutex_lock(&wusbhc->mmcie_mutex);
145 for (itr = 0; itr < wusbhc->mmcies_max; itr++) {
146 if (wusbhc->mmcie[itr] == wuie) {
147 handle = itr;
148 goto found;
149 }
150 }
151 mutex_unlock(&wusbhc->mmcie_mutex);
152 return;
153
154found:
155 result = (wusbhc->mmcie_rm)(wusbhc, handle);
156 if (result == 0)
157 wusbhc->mmcie[itr] = NULL;
158 mutex_unlock(&wusbhc->mmcie_mutex);
159}
160EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm);
161
162/*
163 * wusbhc_start - start transmitting MMCs and accepting connections
164 * @wusbhc: the HC to start
165 * @chid: the CHID to use for this host
166 *
167 * Establishes a cluster reservation, enables device connections, and
168 * starts MMCs with appropriate DNTS parameters.
169 */
170int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
171{
172 int result;
173 struct device *dev = wusbhc->dev;
174
175 WARN_ON(wusbhc->wuie_host_info != NULL);
176
177 result = wusbhc_rsv_establish(wusbhc);
178 if (result < 0) {
179 dev_err(dev, "cannot establish cluster reservation: %d\n",
180 result);
181 goto error_rsv_establish;
182 }
183
184 result = wusbhc_devconnect_start(wusbhc, chid);
185 if (result < 0) {
186 dev_err(dev, "error enabling device connections: %d\n", result);
187 goto error_devconnect_start;
188 }
189
190 result = wusbhc_sec_start(wusbhc);
191 if (result < 0) {
192 dev_err(dev, "error starting security in the HC: %d\n", result);
193 goto error_sec_start;
194 }
195 /* FIXME: the choice of the DNTS parameters is somewhat
196 * arbitrary */
197 result = wusbhc->set_num_dnts(wusbhc, 0, 15);
198 if (result < 0) {
199 dev_err(dev, "Cannot set DNTS parameters: %d\n", result);
200 goto error_set_num_dnts;
201 }
202 result = wusbhc->start(wusbhc);
203 if (result < 0) {
204 dev_err(dev, "error starting wusbch: %d\n", result);
205 goto error_wusbhc_start;
206 }
207 wusbhc->active = 1;
208 return 0;
209
210error_wusbhc_start:
211 wusbhc_sec_stop(wusbhc);
212error_set_num_dnts:
213error_sec_start:
214 wusbhc_devconnect_stop(wusbhc);
215error_devconnect_start:
216 wusbhc_rsv_terminate(wusbhc);
217error_rsv_establish:
218 return result;
219}
220
221/*
222 * Disconnect all from the WUSB Channel
223 *
224 * Send a Host Disconnect IE in the MMC, wait, don't send it any more
225 */
226static int __wusbhc_host_disconnect_ie(struct wusbhc *wusbhc)
227{
228 int result = -ENOMEM;
229 struct wuie_host_disconnect *host_disconnect_ie;
230 might_sleep();
231 host_disconnect_ie = kmalloc(sizeof(*host_disconnect_ie), GFP_KERNEL);
232 if (host_disconnect_ie == NULL)
233 goto error_alloc;
234 host_disconnect_ie->hdr.bLength = sizeof(*host_disconnect_ie);
235 host_disconnect_ie->hdr.bIEIdentifier = WUIE_ID_HOST_DISCONNECT;
236 result = wusbhc_mmcie_set(wusbhc, 0, 0, &host_disconnect_ie->hdr);
237 if (result < 0)
238 goto error_mmcie_set;
239
240 /* WUSB1.0[8.5.3.1 & 7.5.2] */
241 msleep(100);
242 wusbhc_mmcie_rm(wusbhc, &host_disconnect_ie->hdr);
243error_mmcie_set:
244 kfree(host_disconnect_ie);
245error_alloc:
246 return result;
247}
248
249/*
250 * wusbhc_stop - stop transmitting MMCs
251 * @wusbhc: the HC to stop
252 *
253 * Send a Host Disconnect IE, wait, remove all the MMCs (stop sending MMCs).
254 *
255 * If we can't allocate a Host Stop IE, screw it, we don't notify the
256 * devices we are disconnecting...
257 */
258void wusbhc_stop(struct wusbhc *wusbhc)
259{
260 if (wusbhc->active) {
261 wusbhc->active = 0;
262 wusbhc->stop(wusbhc);
263 wusbhc_sec_stop(wusbhc);
264 __wusbhc_host_disconnect_ie(wusbhc);
265 wusbhc_devconnect_stop(wusbhc);
266 wusbhc_rsv_terminate(wusbhc);
267 }
268}
269EXPORT_SYMBOL_GPL(wusbhc_stop);
270
271/*
272 * Change the CHID in a WUSB Channel
273 *
274 * If it is just a new CHID, send a Host Disconnect IE and then change
275 * the CHID IE.
276 */
277static int __wusbhc_chid_change(struct wusbhc *wusbhc,
278 const struct wusb_ckhdid *chid)
279{
280 int result = -ENOSYS;
281 struct device *dev = wusbhc->dev;
282 dev_err(dev, "%s() not implemented yet\n", __func__);
283 return result;
284
285 BUG_ON(wusbhc->wuie_host_info == NULL);
286 __wusbhc_host_disconnect_ie(wusbhc);
287 wusbhc->wuie_host_info->CHID = *chid;
288 result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->wuie_host_info->hdr);
289 if (result < 0)
290 dev_err(dev, "Can't update Host Info WUSB IE: %d\n", result);
291 return result;
292}
293
294/*
295 * Set/reset/update a new CHID
296 *
297 * Depending on the previous state of the MMCs, start, stop or change
298 * the sent MMC. This effectively switches the host controller on and
299 * off (radio wise).
300 */
301int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
302{
303 int result = 0;
304
305 if (memcmp(chid, &wusb_ckhdid_zero, sizeof(chid)) == 0)
306 chid = NULL;
307
308 mutex_lock(&wusbhc->mutex);
309 if (wusbhc->active) {
310 if (chid)
311 result = __wusbhc_chid_change(wusbhc, chid);
312 else
313 wusbhc_stop(wusbhc);
314 } else {
315 if (chid)
316 wusbhc_start(wusbhc, chid);
317 }
318 mutex_unlock(&wusbhc->mutex);
319 return result;
320}
321EXPORT_SYMBOL_GPL(wusbhc_chid_set);
diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c
new file mode 100644
index 000000000000..7cc51e9905cf
--- /dev/null
+++ b/drivers/usb/wusbcore/pal.c
@@ -0,0 +1,42 @@
1/*
2 * Wireless USB Host Controller
3 * UWB Protocol Adaptation Layer (PAL) glue.
4 *
5 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#include "wusbhc.h"
20
21/**
22 * wusbhc_pal_register - register the WUSB HC as a UWB PAL
23 * @wusbhc: the WUSB HC
24 */
25int wusbhc_pal_register(struct wusbhc *wusbhc)
26{
27 uwb_pal_init(&wusbhc->pal);
28
29 wusbhc->pal.name = "wusbhc";
30 wusbhc->pal.device = wusbhc->usb_hcd.self.controller;
31
32 return uwb_pal_register(wusbhc->uwb_rc, &wusbhc->pal);
33}
34
35/**
36 * wusbhc_pal_register - unregister the WUSB HC as a UWB PAL
37 * @wusbhc: the WUSB HC
38 */
39void wusbhc_pal_unregister(struct wusbhc *wusbhc)
40{
41 uwb_pal_unregister(wusbhc->uwb_rc, &wusbhc->pal);
42}
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c
new file mode 100644
index 000000000000..fc63e77ded2d
--- /dev/null
+++ b/drivers/usb/wusbcore/reservation.c
@@ -0,0 +1,115 @@
1/*
2 * WUSB cluster reservation management
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/uwb.h>
20
21#include "wusbhc.h"
22
23/*
24 * WUSB cluster reservations are multicast reservations with the
25 * broadcast cluster ID (BCID) as the target DevAddr.
26 *
27 * FIXME: consider adjusting the reservation depending on what devices
28 * are attached.
29 */
30
31static int wusbhc_bwa_set(struct wusbhc *wusbhc, u8 stream,
32 const struct uwb_mas_bm *mas)
33{
34 if (mas == NULL)
35 mas = &uwb_mas_bm_zero;
36 return wusbhc->bwa_set(wusbhc, stream, mas);
37}
38
39/**
40 * wusbhc_rsv_complete_cb - WUSB HC reservation complete callback
41 * @rsv: the reservation
42 *
43 * Either set or clear the HC's view of the reservation.
44 *
45 * FIXME: when a reservation is denied the HC should be stopped.
46 */
47static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
48{
49 struct wusbhc *wusbhc = rsv->pal_priv;
50 struct device *dev = wusbhc->dev;
51 char buf[72];
52
53 switch (rsv->state) {
54 case UWB_RSV_STATE_O_ESTABLISHED:
55 bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS);
56 dev_dbg(dev, "established reservation: %s\n", buf);
57 wusbhc_bwa_set(wusbhc, rsv->stream, &rsv->mas);
58 break;
59 case UWB_RSV_STATE_NONE:
60 dev_dbg(dev, "removed reservation\n");
61 wusbhc_bwa_set(wusbhc, 0, NULL);
62 wusbhc->rsv = NULL;
63 break;
64 default:
65 dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state);
66 break;
67 }
68}
69
70
71/**
72 * wusbhc_rsv_establish - establish a reservation for the cluster
73 * @wusbhc: the WUSB HC requesting a bandwith reservation
74 */
75int wusbhc_rsv_establish(struct wusbhc *wusbhc)
76{
77 struct uwb_rc *rc = wusbhc->uwb_rc;
78 struct uwb_rsv *rsv;
79 struct uwb_dev_addr bcid;
80 int ret;
81
82 rsv = uwb_rsv_create(rc, wusbhc_rsv_complete_cb, wusbhc);
83 if (rsv == NULL)
84 return -ENOMEM;
85
86 bcid.data[0] = wusbhc->cluster_id;
87 bcid.data[1] = 0;
88
89 rsv->owner = &rc->uwb_dev;
90 rsv->target.type = UWB_RSV_TARGET_DEVADDR;
91 rsv->target.devaddr = bcid;
92 rsv->type = UWB_DRP_TYPE_PRIVATE;
93 rsv->max_mas = 256;
94 rsv->min_mas = 16; /* one MAS per zone? */
95 rsv->sparsity = 16; /* at least one MAS in each zone? */
96 rsv->is_multicast = true;
97
98 ret = uwb_rsv_establish(rsv);
99 if (ret == 0)
100 wusbhc->rsv = rsv;
101 else
102 uwb_rsv_destroy(rsv);
103 return ret;
104}
105
106
107/**
108 * wusbhc_rsv_terminate - terminate any cluster reservation
109 * @wusbhc: the WUSB host whose reservation is to be terminated
110 */
111void wusbhc_rsv_terminate(struct wusbhc *wusbhc)
112{
113 if (wusbhc->rsv)
114 uwb_rsv_terminate(wusbhc->rsv);
115}
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
new file mode 100644
index 000000000000..267a64325106
--- /dev/null
+++ b/drivers/usb/wusbcore/rh.c
@@ -0,0 +1,477 @@
1/*
2 * Wireless USB Host Controller
3 * Root Hub operations
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * We fake a root hub that has fake ports (as many as simultaneous
25 * devices the Wireless USB Host Controller can deal with). For each
26 * port we keep an state in @wusbhc->port[index] identical to the one
27 * specified in the USB2.0[ch11] spec and some extra device
28 * information that complements the one in 'struct usb_device' (as
29 * this lacs a hcpriv pointer).
30 *
31 * Note this is common to WHCI and HWA host controllers.
32 *
33 * Through here we enable most of the state changes that the USB stack
34 * will use to connect or disconnect devices. We need to do some
35 * forced adaptation of Wireless USB device states vs. wired:
36 *
37 * USB: WUSB:
38 *
39 * Port Powered-off port slot n/a
40 * Powered-on port slot available
41 * Disconnected port slot available
42 * Connected port slot assigned device
43 * device sent DN_Connect
44 * device was authenticated
45 * Enabled device is authenticated, transitioned
46 * from unauth -> auth -> default address
47 * -> enabled
48 * Reset disconnect
49 * Disable disconnect
50 *
51 * This maps the standard USB port states with the WUSB device states
52 * so we can fake ports without having to modify the USB stack.
53 *
54 * FIXME: this process will change in the future
55 *
56 *
57 * ENTRY POINTS
58 *
59 * Our entry points into here are, as in hcd.c, the USB stack root hub
60 * ops defined in the usb_hcd struct:
61 *
62 * wusbhc_rh_status_data() Provide hub and port status data bitmap
63 *
64 * wusbhc_rh_control() Execution of all the major requests
65 * you can do to a hub (Set|Clear
66 * features, get descriptors, status, etc).
67 *
68 * wusbhc_rh_[suspend|resume]() That
69 *
70 * wusbhc_rh_start_port_reset() ??? unimplemented
71 */
72#include "wusbhc.h"
73
74#define D_LOCAL 0
75#include <linux/uwb/debug.h>
76
77/*
78 * Reset a fake port
79 *
80 * This can be called to reset a port from any other state or to reset
81 * it when connecting. In Wireless USB they are different; when doing
82 * a new connect that involves going over the authentication. When
83 * just reseting, its a different story.
84 *
85 * The Linux USB stack resets a port twice before it considers it
86 * enabled, so we have to detect and ignore that.
87 *
88 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
89 *
90 * Supposedly we are the only thread accesing @wusbhc->port; in any
91 * case, maybe we should move the mutex locking from
92 * wusbhc_devconnect_auth() to here.
93 *
94 * @port_idx refers to the wusbhc's port index, not the USB port number
95 */
96static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx)
97{
98 int result = 0;
99 struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx);
100
101 d_fnstart(3, wusbhc->dev, "(wusbhc %p port_idx %u)\n",
102 wusbhc, port_idx);
103 if (port->reset_count == 0) {
104 wusbhc_devconnect_auth(wusbhc, port_idx);
105 port->reset_count++;
106 } else if (port->reset_count == 1)
107 /* see header */
108 d_printf(2, wusbhc->dev, "Ignoring second reset on port_idx "
109 "%u\n", port_idx);
110 else
111 result = wusbhc_dev_reset(wusbhc, port_idx);
112 d_fnend(3, wusbhc->dev, "(wusbhc %p port_idx %u) = %d\n",
113 wusbhc, port_idx, result);
114 return result;
115}
116
117/*
118 * Return the hub change status bitmap
119 *
120 * The bits in the change status bitmap are cleared when a
121 * ClearPortFeature request is issued (USB2.0[11.12.3,11.12.4].
122 *
123 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
124 *
125 * WARNING!! This gets called from atomic context; we cannot get the
126 * mutex--the only race condition we can find is some bit
127 * changing just after we copy it, which shouldn't be too
128 * big of a problem [and we can't make it an spinlock
129 * because other parts need to take it and sleep] .
130 *
131 * @usb_hcd is refcounted, so it won't dissapear under us
132 * and before killing a host, the polling of the root hub
133 * would be stopped anyway.
134 */
135int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf)
136{
137 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
138 size_t cnt, size;
139 unsigned long *buf = (unsigned long *) _buf;
140
141 d_fnstart(1, wusbhc->dev, "(wusbhc %p)\n", wusbhc);
142 /* WE DON'T LOCK, see comment */
143 size = wusbhc->ports_max + 1 /* hub bit */;
144 size = (size + 8 - 1) / 8; /* round to bytes */
145 for (cnt = 0; cnt < wusbhc->ports_max; cnt++)
146 if (wusb_port_by_idx(wusbhc, cnt)->change)
147 set_bit(cnt + 1, buf);
148 else
149 clear_bit(cnt + 1, buf);
150 d_fnend(1, wusbhc->dev, "(wusbhc %p) %u, buffer:\n", wusbhc, (int)size);
151 d_dump(1, wusbhc->dev, _buf, size);
152 return size;
153}
154EXPORT_SYMBOL_GPL(wusbhc_rh_status_data);
155
156/*
157 * Return the hub's desciptor
158 *
159 * NOTE: almost cut and paste from ehci-hub.c
160 *
161 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked
162 */
163static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue,
164 u16 wIndex,
165 struct usb_hub_descriptor *descr,
166 u16 wLength)
167{
168 u16 temp = 1 + (wusbhc->ports_max / 8);
169 u8 length = 7 + 2 * temp;
170
171 if (wLength < length)
172 return -ENOSPC;
173 descr->bDescLength = 7 + 2 * temp;
174 descr->bDescriptorType = 0x29; /* HUB type */
175 descr->bNbrPorts = wusbhc->ports_max;
176 descr->wHubCharacteristics = cpu_to_le16(
177 0x00 /* All ports power at once */
178 | 0x00 /* not part of compound device */
179 | 0x10 /* No overcurrent protection */
180 | 0x00 /* 8 FS think time FIXME ?? */
181 | 0x00); /* No port indicators */
182 descr->bPwrOn2PwrGood = 0;
183 descr->bHubContrCurrent = 0;
184 /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
185 memset(&descr->bitmap[0], 0, temp);
186 memset(&descr->bitmap[temp], 0xff, temp);
187 return 0;
188}
189
190/*
191 * Clear a hub feature
192 *
193 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
194 *
195 * Nothing to do, so no locking needed ;)
196 */
197static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature)
198{
199 int result;
200 struct device *dev = wusbhc->dev;
201
202 d_fnstart(4, dev, "(%p, feature 0x%04u)\n", wusbhc, feature);
203 switch (feature) {
204 case C_HUB_LOCAL_POWER:
205 /* FIXME: maybe plug bit 0 to the power input status,
206 * if any?
207 * see wusbhc_rh_get_hub_status() */
208 case C_HUB_OVER_CURRENT:
209 result = 0;
210 break;
211 default:
212 result = -EPIPE;
213 }
214 d_fnend(4, dev, "(%p, feature 0x%04u), %d\n", wusbhc, feature, result);
215 return result;
216}
217
218/*
219 * Return hub status (it is always zero...)
220 *
221 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
222 *
223 * Nothing to do, so no locking needed ;)
224 */
225static int wusbhc_rh_get_hub_status(struct wusbhc *wusbhc, u32 *buf,
226 u16 wLength)
227{
228 /* FIXME: maybe plug bit 0 to the power input status (if any)? */
229 *buf = 0;
230 return 0;
231}
232
233/*
234 * Set a port feature
235 *
236 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
237 */
238static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature,
239 u8 selector, u8 port_idx)
240{
241 int result = -EINVAL;
242 struct device *dev = wusbhc->dev;
243
244 d_fnstart(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d)\n",
245 feature, selector, port_idx);
246
247 if (port_idx > wusbhc->ports_max)
248 goto error;
249
250 switch (feature) {
251 /* According to USB2.0[11.24.2.13]p2, these features
252 * are not required to be implemented. */
253 case USB_PORT_FEAT_C_OVER_CURRENT:
254 case USB_PORT_FEAT_C_ENABLE:
255 case USB_PORT_FEAT_C_SUSPEND:
256 case USB_PORT_FEAT_C_CONNECTION:
257 case USB_PORT_FEAT_C_RESET:
258 result = 0;
259 break;
260
261 case USB_PORT_FEAT_POWER:
262 /* No such thing, but we fake it works */
263 mutex_lock(&wusbhc->mutex);
264 wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER;
265 mutex_unlock(&wusbhc->mutex);
266 result = 0;
267 break;
268 case USB_PORT_FEAT_RESET:
269 result = wusbhc_rh_port_reset(wusbhc, port_idx);
270 break;
271 case USB_PORT_FEAT_ENABLE:
272 case USB_PORT_FEAT_SUSPEND:
273 dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n",
274 port_idx, feature, selector);
275 result = -ENOSYS;
276 break;
277 default:
278 dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n",
279 port_idx, feature, selector);
280 result = -EPIPE;
281 break;
282 }
283error:
284 d_fnend(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d) = %d\n",
285 feature, selector, port_idx, result);
286 return result;
287}
288
289/*
290 * Clear a port feature...
291 *
292 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
293 */
294static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature,
295 u8 selector, u8 port_idx)
296{
297 int result = -EINVAL;
298 struct device *dev = wusbhc->dev;
299
300 d_fnstart(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d)\n",
301 wusbhc, feature, selector, port_idx);
302
303 if (port_idx > wusbhc->ports_max)
304 goto error;
305
306 mutex_lock(&wusbhc->mutex);
307 result = 0;
308 switch (feature) {
309 case USB_PORT_FEAT_POWER: /* fake port always on */
310 /* According to USB2.0[11.24.2.7.1.4], no need to implement? */
311 case USB_PORT_FEAT_C_OVER_CURRENT:
312 break;
313 case USB_PORT_FEAT_C_RESET:
314 wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_RESET;
315 break;
316 case USB_PORT_FEAT_C_CONNECTION:
317 wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_CONNECTION;
318 break;
319 case USB_PORT_FEAT_ENABLE:
320 __wusbhc_dev_disable(wusbhc, port_idx);
321 break;
322 case USB_PORT_FEAT_C_ENABLE:
323 wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_ENABLE;
324 break;
325 case USB_PORT_FEAT_SUSPEND:
326 case USB_PORT_FEAT_C_SUSPEND:
327 case 0xffff: /* ??? FIXME */
328 dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n",
329 port_idx, feature, selector);
330 /* dump_stack(); */
331 result = -ENOSYS;
332 break;
333 default:
334 dev_err(dev, "(port_idx %d) Clear feat %d/%d UNKNOWN\n",
335 port_idx, feature, selector);
336 result = -EPIPE;
337 break;
338 }
339 mutex_unlock(&wusbhc->mutex);
340error:
341 d_fnend(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d) = "
342 "%d\n", wusbhc, feature, selector, port_idx, result);
343 return result;
344}
345
346/*
347 * Return the port's status
348 *
349 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
350 */
351static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx,
352 u32 *_buf, u16 wLength)
353{
354 int result = -EINVAL;
355 u16 *buf = (u16 *) _buf;
356
357 d_fnstart(1, wusbhc->dev, "(wusbhc %p port_idx %u wLength %u)\n",
358 wusbhc, port_idx, wLength);
359 if (port_idx > wusbhc->ports_max)
360 goto error;
361 mutex_lock(&wusbhc->mutex);
362 buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status);
363 buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change);
364 result = 0;
365 mutex_unlock(&wusbhc->mutex);
366error:
367 d_fnend(1, wusbhc->dev, "(wusbhc %p) = %d, buffer:\n", wusbhc, result);
368 d_dump(1, wusbhc->dev, _buf, wLength);
369 return result;
370}
371
372/*
373 * Entry point for Root Hub operations
374 *
375 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
376 */
377int wusbhc_rh_control(struct usb_hcd *usb_hcd, u16 reqntype, u16 wValue,
378 u16 wIndex, char *buf, u16 wLength)
379{
380 int result = -ENOSYS;
381 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
382
383 switch (reqntype) {
384 case GetHubDescriptor:
385 result = wusbhc_rh_get_hub_descr(
386 wusbhc, wValue, wIndex,
387 (struct usb_hub_descriptor *) buf, wLength);
388 break;
389 case ClearHubFeature:
390 result = wusbhc_rh_clear_hub_feat(wusbhc, wValue);
391 break;
392 case GetHubStatus:
393 result = wusbhc_rh_get_hub_status(wusbhc, (u32 *)buf, wLength);
394 break;
395
396 case SetPortFeature:
397 result = wusbhc_rh_set_port_feat(wusbhc, wValue, wIndex >> 8,
398 (wIndex & 0xff) - 1);
399 break;
400 case ClearPortFeature:
401 result = wusbhc_rh_clear_port_feat(wusbhc, wValue, wIndex >> 8,
402 (wIndex & 0xff) - 1);
403 break;
404 case GetPortStatus:
405 result = wusbhc_rh_get_port_status(wusbhc, wIndex - 1,
406 (u32 *)buf, wLength);
407 break;
408
409 case SetHubFeature:
410 default:
411 dev_err(wusbhc->dev, "%s (%p [%p], %x, %x, %x, %p, %x) "
412 "UNIMPLEMENTED\n", __func__, usb_hcd, wusbhc, reqntype,
413 wValue, wIndex, buf, wLength);
414 /* dump_stack(); */
415 result = -ENOSYS;
416 }
417 return result;
418}
419EXPORT_SYMBOL_GPL(wusbhc_rh_control);
420
421int wusbhc_rh_suspend(struct usb_hcd *usb_hcd)
422{
423 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
424 dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
425 usb_hcd, wusbhc);
426 /* dump_stack(); */
427 return -ENOSYS;
428}
429EXPORT_SYMBOL_GPL(wusbhc_rh_suspend);
430
431int wusbhc_rh_resume(struct usb_hcd *usb_hcd)
432{
433 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
434 dev_err(wusbhc->dev, "%s (%p [%p]) UNIMPLEMENTED\n", __func__,
435 usb_hcd, wusbhc);
436 /* dump_stack(); */
437 return -ENOSYS;
438}
439EXPORT_SYMBOL_GPL(wusbhc_rh_resume);
440
441int wusbhc_rh_start_port_reset(struct usb_hcd *usb_hcd, unsigned port_idx)
442{
443 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
444 dev_err(wusbhc->dev, "%s (%p [%p], port_idx %u) UNIMPLEMENTED\n",
445 __func__, usb_hcd, wusbhc, port_idx);
446 WARN_ON(1);
447 return -ENOSYS;
448}
449EXPORT_SYMBOL_GPL(wusbhc_rh_start_port_reset);
450
451static void wusb_port_init(struct wusb_port *port)
452{
453 port->status |= USB_PORT_STAT_HIGH_SPEED;
454}
455
456/*
457 * Alloc fake port specific fields and status.
458 */
459int wusbhc_rh_create(struct wusbhc *wusbhc)
460{
461 int result = -ENOMEM;
462 size_t port_size, itr;
463 port_size = wusbhc->ports_max * sizeof(wusbhc->port[0]);
464 wusbhc->port = kzalloc(port_size, GFP_KERNEL);
465 if (wusbhc->port == NULL)
466 goto error_port_alloc;
467 for (itr = 0; itr < wusbhc->ports_max; itr++)
468 wusb_port_init(&wusbhc->port[itr]);
469 result = 0;
470error_port_alloc:
471 return result;
472}
473
474void wusbhc_rh_destroy(struct wusbhc *wusbhc)
475{
476 kfree(wusbhc->port);
477}
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
new file mode 100644
index 000000000000..a101cad6a8d4
--- /dev/null
+++ b/drivers/usb/wusbcore/security.c
@@ -0,0 +1,642 @@
1/*
2 * Wireless USB Host Controller
3 * Security support: encryption enablement, etc
4 *
5 * Copyright (C) 2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25#include <linux/types.h>
26#include <linux/usb/ch9.h>
27#include <linux/random.h>
28#include "wusbhc.h"
29
30/*
31 * DEBUG & SECURITY WARNING!!!!
32 *
33 * If you enable this past 1, the debug code will weaken the
34 * cryptographic safety of the system (on purpose, for debugging).
35 *
36 * Weaken means:
37 * we print secret keys and intermediate values all the way,
38 */
39#undef D_LOCAL
40#define D_LOCAL 2
41#include <linux/uwb/debug.h>
42
43static void wusbhc_set_gtk_callback(struct urb *urb);
44static void wusbhc_gtk_rekey_done_work(struct work_struct *work);
45
46int wusbhc_sec_create(struct wusbhc *wusbhc)
47{
48 wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) + sizeof(wusbhc->gtk.data);
49 wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
50 wusbhc->gtk.descr.bReserved = 0;
51
52 wusbhc->gtk_index = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
53 WUSB_KEY_INDEX_ORIGINATOR_HOST);
54
55 INIT_WORK(&wusbhc->gtk_rekey_done_work, wusbhc_gtk_rekey_done_work);
56
57 return 0;
58}
59
60
61/* Called when the HC is destroyed */
62void wusbhc_sec_destroy(struct wusbhc *wusbhc)
63{
64}
65
66
67/**
68 * wusbhc_next_tkid - generate a new, currently unused, TKID
69 * @wusbhc: the WUSB host controller
70 * @wusb_dev: the device whose PTK the TKID is for
71 * (or NULL for a TKID for a GTK)
72 *
73 * The generated TKID consist of two parts: the device's authenicated
74 * address (or 0 or a GTK); and an incrementing number. This ensures
75 * that TKIDs cannot be shared between devices and by the time the
76 * incrementing number wraps around the older TKIDs will no longer be
77 * in use (a maximum of two keys may be active at any one time).
78 */
79static u32 wusbhc_next_tkid(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
80{
81 u32 *tkid;
82 u32 addr;
83
84 if (wusb_dev == NULL) {
85 tkid = &wusbhc->gtk_tkid;
86 addr = 0;
87 } else {
88 tkid = &wusb_port_by_idx(wusbhc, wusb_dev->port_idx)->ptk_tkid;
89 addr = wusb_dev->addr & 0x7f;
90 }
91
92 *tkid = (addr << 8) | ((*tkid + 1) & 0xff);
93
94 return *tkid;
95}
96
97static void wusbhc_generate_gtk(struct wusbhc *wusbhc)
98{
99 const size_t key_size = sizeof(wusbhc->gtk.data);
100 u32 tkid;
101
102 tkid = wusbhc_next_tkid(wusbhc, NULL);
103
104 wusbhc->gtk.descr.tTKID[0] = (tkid >> 0) & 0xff;
105 wusbhc->gtk.descr.tTKID[1] = (tkid >> 8) & 0xff;
106 wusbhc->gtk.descr.tTKID[2] = (tkid >> 16) & 0xff;
107
108 get_random_bytes(wusbhc->gtk.descr.bKeyData, key_size);
109}
110
111/**
112 * wusbhc_sec_start - start the security management process
113 * @wusbhc: the WUSB host controller
114 *
115 * Generate and set an initial GTK on the host controller.
116 *
117 * Called when the HC is started.
118 */
119int wusbhc_sec_start(struct wusbhc *wusbhc)
120{
121 const size_t key_size = sizeof(wusbhc->gtk.data);
122 int result;
123
124 wusbhc_generate_gtk(wusbhc);
125
126 result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
127 &wusbhc->gtk.descr.bKeyData, key_size);
128 if (result < 0)
129 dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n",
130 result);
131
132 return result;
133}
134
135/**
136 * wusbhc_sec_stop - stop the security management process
137 * @wusbhc: the WUSB host controller
138 *
139 * Wait for any pending GTK rekeys to stop.
140 */
141void wusbhc_sec_stop(struct wusbhc *wusbhc)
142{
143 cancel_work_sync(&wusbhc->gtk_rekey_done_work);
144}
145
146
147/** @returns encryption type name */
148const char *wusb_et_name(u8 x)
149{
150 switch (x) {
151 case USB_ENC_TYPE_UNSECURE: return "unsecure";
152 case USB_ENC_TYPE_WIRED: return "wired";
153 case USB_ENC_TYPE_CCM_1: return "CCM-1";
154 case USB_ENC_TYPE_RSA_1: return "RSA-1";
155 default: return "unknown";
156 }
157}
158EXPORT_SYMBOL_GPL(wusb_et_name);
159
160/*
161 * Set the device encryption method
162 *
163 * We tell the device which encryption method to use; we do this when
164 * setting up the device's security.
165 */
166static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
167{
168 int result;
169 struct device *dev = &usb_dev->dev;
170 struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
171
172 if (value) {
173 value = wusb_dev->ccm1_etd.bEncryptionValue;
174 } else {
175 /* FIXME: should be wusb_dev->etd[UNSECURE].bEncryptionValue */
176 value = 0;
177 }
178 /* Set device's */
179 result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
180 USB_REQ_SET_ENCRYPTION,
181 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
182 value, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
183 if (result < 0)
184 dev_err(dev, "Can't set device's WUSB encryption to "
185 "%s (value %d): %d\n",
186 wusb_et_name(wusb_dev->ccm1_etd.bEncryptionType),
187 wusb_dev->ccm1_etd.bEncryptionValue, result);
188 return result;
189}
190
191/*
192 * Set the GTK to be used by a device.
193 *
194 * The device must be authenticated.
195 */
196static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
197{
198 struct usb_device *usb_dev = wusb_dev->usb_dev;
199
200 return usb_control_msg(
201 usb_dev, usb_sndctrlpipe(usb_dev, 0),
202 USB_REQ_SET_DESCRIPTOR,
203 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
204 USB_DT_KEY << 8 | wusbhc->gtk_index, 0,
205 &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
206 1000);
207}
208
209
210/* FIXME: prototype for adding security */
211int wusb_dev_sec_add(struct wusbhc *wusbhc,
212 struct usb_device *usb_dev, struct wusb_dev *wusb_dev)
213{
214 int result, bytes, secd_size;
215 struct device *dev = &usb_dev->dev;
216 struct usb_security_descriptor secd;
217 const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL;
218 void *secd_buf;
219 const void *itr, *top;
220 char buf[64];
221
222 d_fnstart(3, dev, "(usb_dev %p, wusb_dev %p)\n", usb_dev, wusb_dev);
223 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
224 0, &secd, sizeof(secd));
225 if (result < sizeof(secd)) {
226 dev_err(dev, "Can't read security descriptor or "
227 "not enough data: %d\n", result);
228 goto error_secd;
229 }
230 secd_size = le16_to_cpu(secd.wTotalLength);
231 d_printf(5, dev, "got %d bytes of sec descriptor, total is %d\n",
232 result, secd_size);
233 secd_buf = kmalloc(secd_size, GFP_KERNEL);
234 if (secd_buf == NULL) {
235 dev_err(dev, "Can't allocate space for security descriptors\n");
236 goto error_secd_alloc;
237 }
238 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
239 0, secd_buf, secd_size);
240 if (result < secd_size) {
241 dev_err(dev, "Can't read security descriptor or "
242 "not enough data: %d\n", result);
243 goto error_secd_all;
244 }
245 d_printf(5, dev, "got %d bytes of sec descriptors\n", result);
246 bytes = 0;
247 itr = secd_buf + sizeof(secd);
248 top = secd_buf + result;
249 while (itr < top) {
250 etd = itr;
251 if (top - itr < sizeof(*etd)) {
252 dev_err(dev, "BUG: bad device security descriptor; "
253 "not enough data (%zu vs %zu bytes left)\n",
254 top - itr, sizeof(*etd));
255 break;
256 }
257 if (etd->bLength < sizeof(*etd)) {
258 dev_err(dev, "BUG: bad device encryption descriptor; "
259 "descriptor is too short "
260 "(%u vs %zu needed)\n",
261 etd->bLength, sizeof(*etd));
262 break;
263 }
264 itr += etd->bLength;
265 bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
266 "%s (0x%02x/%02x) ",
267 wusb_et_name(etd->bEncryptionType),
268 etd->bEncryptionValue, etd->bAuthKeyIndex);
269 if (etd->bEncryptionType == USB_ENC_TYPE_CCM_1)
270 ccm1_etd = etd;
271 }
272 /* This code only supports CCM1 as of now. */
273 /* FIXME: user has to choose which sec mode to use?
274 * In theory we want CCM */
275 if (ccm1_etd == NULL) {
276 dev_err(dev, "WUSB device doesn't support CCM1 encryption, "
277 "can't use!\n");
278 result = -EINVAL;
279 goto error_no_ccm1;
280 }
281 wusb_dev->ccm1_etd = *ccm1_etd;
282 dev_info(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n",
283 buf, wusb_et_name(ccm1_etd->bEncryptionType),
284 ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex);
285 result = 0;
286 kfree(secd_buf);
287out:
288 d_fnend(3, dev, "(usb_dev %p, wusb_dev %p) = %d\n",
289 usb_dev, wusb_dev, result);
290 return result;
291
292
293error_no_ccm1:
294error_secd_all:
295 kfree(secd_buf);
296error_secd_alloc:
297error_secd:
298 goto out;
299}
300
301void wusb_dev_sec_rm(struct wusb_dev *wusb_dev)
302{
303 /* Nothing so far */
304}
305
306static void hs_printk(unsigned level, struct device *dev,
307 struct usb_handshake *hs)
308{
309 d_printf(level, dev,
310 " bMessageNumber: %u\n"
311 " bStatus: %u\n"
312 " tTKID: %02x %02x %02x\n"
313 " CDID: %02x %02x %02x %02x %02x %02x %02x %02x\n"
314 " %02x %02x %02x %02x %02x %02x %02x %02x\n"
315 " nonce: %02x %02x %02x %02x %02x %02x %02x %02x\n"
316 " %02x %02x %02x %02x %02x %02x %02x %02x\n"
317 " MIC: %02x %02x %02x %02x %02x %02x %02x %02x\n",
318 hs->bMessageNumber, hs->bStatus,
319 hs->tTKID[2], hs->tTKID[1], hs->tTKID[0],
320 hs->CDID[0], hs->CDID[1], hs->CDID[2], hs->CDID[3],
321 hs->CDID[4], hs->CDID[5], hs->CDID[6], hs->CDID[7],
322 hs->CDID[8], hs->CDID[9], hs->CDID[10], hs->CDID[11],
323 hs->CDID[12], hs->CDID[13], hs->CDID[14], hs->CDID[15],
324 hs->nonce[0], hs->nonce[1], hs->nonce[2], hs->nonce[3],
325 hs->nonce[4], hs->nonce[5], hs->nonce[6], hs->nonce[7],
326 hs->nonce[8], hs->nonce[9], hs->nonce[10], hs->nonce[11],
327 hs->nonce[12], hs->nonce[13], hs->nonce[14], hs->nonce[15],
328 hs->MIC[0], hs->MIC[1], hs->MIC[2], hs->MIC[3],
329 hs->MIC[4], hs->MIC[5], hs->MIC[6], hs->MIC[7]);
330}
331
332/**
333 * Update the address of an unauthenticated WUSB device
334 *
335 * Once we have successfully authenticated, we take it to addr0 state
336 * and then to a normal address.
337 *
338 * Before the device's address (as known by it) was usb_dev->devnum |
339 * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum.
340 */
341static int wusb_dev_update_address(struct wusbhc *wusbhc,
342 struct wusb_dev *wusb_dev)
343{
344 int result = -ENOMEM;
345 struct usb_device *usb_dev = wusb_dev->usb_dev;
346 struct device *dev = &usb_dev->dev;
347 u8 new_address = wusb_dev->addr & 0x7F;
348
349 /* Set address 0 */
350 result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
351 USB_REQ_SET_ADDRESS, 0,
352 0, 0, NULL, 0, 1000 /* FIXME: arbitrary */);
353 if (result < 0) {
354 dev_err(dev, "auth failed: can't set address 0: %d\n",
355 result);
356 goto error_addr0;
357 }
358 result = wusb_set_dev_addr(wusbhc, wusb_dev, 0);
359 if (result < 0)
360 goto error_addr0;
361 usb_ep0_reinit(usb_dev);
362
363 /* Set new (authenticated) address. */
364 result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
365 USB_REQ_SET_ADDRESS, 0,
366 new_address, 0, NULL, 0,
367 1000 /* FIXME: arbitrary */);
368 if (result < 0) {
369 dev_err(dev, "auth failed: can't set address %u: %d\n",
370 new_address, result);
371 goto error_addr;
372 }
373 result = wusb_set_dev_addr(wusbhc, wusb_dev, new_address);
374 if (result < 0)
375 goto error_addr;
376 usb_ep0_reinit(usb_dev);
377 usb_dev->authenticated = 1;
378error_addr:
379error_addr0:
380 return result;
381}
382
383/*
384 *
385 *
386 */
387/* FIXME: split and cleanup */
388int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
389 struct wusb_ckhdid *ck)
390{
391 int result = -ENOMEM;
392 struct usb_device *usb_dev = wusb_dev->usb_dev;
393 struct device *dev = &usb_dev->dev;
394 u32 tkid;
395 __le32 tkid_le;
396 struct usb_handshake *hs;
397 struct aes_ccm_nonce ccm_n;
398 u8 mic[8];
399 struct wusb_keydvt_in keydvt_in;
400 struct wusb_keydvt_out keydvt_out;
401
402 hs = kzalloc(3*sizeof(hs[0]), GFP_KERNEL);
403 if (hs == NULL) {
404 dev_err(dev, "can't allocate handshake data\n");
405 goto error_kzalloc;
406 }
407
408 /* We need to turn encryption before beginning the 4way
409 * hshake (WUSB1.0[.3.2.2]) */
410 result = wusb_dev_set_encryption(usb_dev, 1);
411 if (result < 0)
412 goto error_dev_set_encryption;
413
414 tkid = wusbhc_next_tkid(wusbhc, wusb_dev);
415 tkid_le = cpu_to_le32(tkid);
416
417 hs[0].bMessageNumber = 1;
418 hs[0].bStatus = 0;
419 memcpy(hs[0].tTKID, &tkid_le, sizeof(hs[0].tTKID));
420 hs[0].bReserved = 0;
421 memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
422 get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
423 memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */
424
425 d_printf(1, dev, "I: sending hs1:\n");
426 hs_printk(2, dev, &hs[0]);
427
428 result = usb_control_msg(
429 usb_dev, usb_sndctrlpipe(usb_dev, 0),
430 USB_REQ_SET_HANDSHAKE,
431 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
432 1, 0, &hs[0], sizeof(hs[0]), 1000 /* FIXME: arbitrary */);
433 if (result < 0) {
434 dev_err(dev, "Handshake1: request failed: %d\n", result);
435 goto error_hs1;
436 }
437
438 /* Handshake 2, from the device -- need to verify fields */
439 result = usb_control_msg(
440 usb_dev, usb_rcvctrlpipe(usb_dev, 0),
441 USB_REQ_GET_HANDSHAKE,
442 USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
443 2, 0, &hs[1], sizeof(hs[1]), 1000 /* FIXME: arbitrary */);
444 if (result < 0) {
445 dev_err(dev, "Handshake2: request failed: %d\n", result);
446 goto error_hs2;
447 }
448 d_printf(1, dev, "got HS2:\n");
449 hs_printk(2, dev, &hs[1]);
450
451 result = -EINVAL;
452 if (hs[1].bMessageNumber != 2) {
453 dev_err(dev, "Handshake2 failed: bad message number %u\n",
454 hs[1].bMessageNumber);
455 goto error_hs2;
456 }
457 if (hs[1].bStatus != 0) {
458 dev_err(dev, "Handshake2 failed: bad status %u\n",
459 hs[1].bStatus);
460 goto error_hs2;
461 }
462 if (memcmp(hs[0].tTKID, hs[1].tTKID, sizeof(hs[0].tTKID))) {
463 dev_err(dev, "Handshake2 failed: TKID mismatch "
464 "(#1 0x%02x%02x%02x vs #2 0x%02x%02x%02x)\n",
465 hs[0].tTKID[0], hs[0].tTKID[1], hs[0].tTKID[2],
466 hs[1].tTKID[0], hs[1].tTKID[1], hs[1].tTKID[2]);
467 goto error_hs2;
468 }
469 if (memcmp(hs[0].CDID, hs[1].CDID, sizeof(hs[0].CDID))) {
470 dev_err(dev, "Handshake2 failed: CDID mismatch\n");
471 goto error_hs2;
472 }
473
474 /* Setup the CCM nonce */
475 memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
476 memcpy(ccm_n.tkid, &tkid_le, sizeof(ccm_n.tkid));
477 ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
478 ccm_n.dest_addr.data[0] = wusb_dev->addr;
479 ccm_n.dest_addr.data[1] = 0;
480
481 /* Derive the KCK and PTK from CK, the CCM, H and D nonces */
482 memcpy(keydvt_in.hnonce, hs[0].nonce, sizeof(keydvt_in.hnonce));
483 memcpy(keydvt_in.dnonce, hs[1].nonce, sizeof(keydvt_in.dnonce));
484 result = wusb_key_derive(&keydvt_out, ck->data, &ccm_n, &keydvt_in);
485 if (result < 0) {
486 dev_err(dev, "Handshake2 failed: cannot derive keys: %d\n",
487 result);
488 goto error_hs2;
489 }
490 d_printf(2, dev, "KCK:\n");
491 d_dump(2, dev, keydvt_out.kck, sizeof(keydvt_out.kck));
492 d_printf(2, dev, "PTK:\n");
493 d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk));
494
495 /* Compute MIC and verify it */
496 result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]);
497 if (result < 0) {
498 dev_err(dev, "Handshake2 failed: cannot compute MIC: %d\n",
499 result);
500 goto error_hs2;
501 }
502
503 d_printf(2, dev, "MIC:\n");
504 d_dump(2, dev, mic, sizeof(mic));
505 if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) {
506 dev_err(dev, "Handshake2 failed: MIC mismatch\n");
507 goto error_hs2;
508 }
509
510 /* Send Handshake3 */
511 hs[2].bMessageNumber = 3;
512 hs[2].bStatus = 0;
513 memcpy(hs[2].tTKID, &tkid_le, sizeof(hs[2].tTKID));
514 hs[2].bReserved = 0;
515 memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID));
516 memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce));
517 result = wusb_oob_mic(hs[2].MIC, keydvt_out.kck, &ccm_n, &hs[2]);
518 if (result < 0) {
519 dev_err(dev, "Handshake3 failed: cannot compute MIC: %d\n",
520 result);
521 goto error_hs2;
522 }
523
524 d_printf(1, dev, "I: sending hs3:\n");
525 hs_printk(2, dev, &hs[2]);
526
527 result = usb_control_msg(
528 usb_dev, usb_sndctrlpipe(usb_dev, 0),
529 USB_REQ_SET_HANDSHAKE,
530 USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
531 3, 0, &hs[2], sizeof(hs[2]), 1000 /* FIXME: arbitrary */);
532 if (result < 0) {
533 dev_err(dev, "Handshake3: request failed: %d\n", result);
534 goto error_hs3;
535 }
536
537 d_printf(1, dev, "I: turning on encryption on host for device\n");
538 d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk));
539 result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid,
540 keydvt_out.ptk, sizeof(keydvt_out.ptk));
541 if (result < 0)
542 goto error_wusbhc_set_ptk;
543
544 d_printf(1, dev, "I: setting a GTK\n");
545 result = wusb_dev_set_gtk(wusbhc, wusb_dev);
546 if (result < 0) {
547 dev_err(dev, "Set GTK for device: request failed: %d\n",
548 result);
549 goto error_wusbhc_set_gtk;
550 }
551
552 /* Update the device's address from unauth to auth */
553 if (usb_dev->authenticated == 0) {
554 d_printf(1, dev, "I: updating addres to auth from non-auth\n");
555 result = wusb_dev_update_address(wusbhc, wusb_dev);
556 if (result < 0)
557 goto error_dev_update_address;
558 }
559 result = 0;
560 d_printf(1, dev, "I: 4way handshke done, device authenticated\n");
561
562error_dev_update_address:
563error_wusbhc_set_gtk:
564error_wusbhc_set_ptk:
565error_hs3:
566error_hs2:
567error_hs1:
568 memset(hs, 0, 3*sizeof(hs[0]));
569 memset(&keydvt_out, 0, sizeof(keydvt_out));
570 memset(&keydvt_in, 0, sizeof(keydvt_in));
571 memset(&ccm_n, 0, sizeof(ccm_n));
572 memset(mic, 0, sizeof(mic));
573 if (result < 0) {
574 /* error path */
575 wusb_dev_set_encryption(usb_dev, 0);
576 }
577error_dev_set_encryption:
578 kfree(hs);
579error_kzalloc:
580 return result;
581}
582
583/*
584 * Once all connected and authenticated devices have received the new
585 * GTK, switch the host to using it.
586 */
587static void wusbhc_gtk_rekey_done_work(struct work_struct *work)
588{
589 struct wusbhc *wusbhc = container_of(work, struct wusbhc, gtk_rekey_done_work);
590 size_t key_size = sizeof(wusbhc->gtk.data);
591
592 mutex_lock(&wusbhc->mutex);
593
594 if (--wusbhc->pending_set_gtks == 0)
595 wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
596
597 mutex_unlock(&wusbhc->mutex);
598}
599
600static void wusbhc_set_gtk_callback(struct urb *urb)
601{
602 struct wusbhc *wusbhc = urb->context;
603
604 queue_work(wusbd, &wusbhc->gtk_rekey_done_work);
605}
606
607/**
608 * wusbhc_gtk_rekey - generate and distribute a new GTK
609 * @wusbhc: the WUSB host controller
610 *
611 * Generate a new GTK and distribute it to all connected and
612 * authenticated devices. When all devices have the new GTK, the host
613 * starts using it.
614 *
615 * This must be called after every device disconnect (see [WUSB]
616 * section 6.2.11.2).
617 */
618void wusbhc_gtk_rekey(struct wusbhc *wusbhc)
619{
620 static const size_t key_size = sizeof(wusbhc->gtk.data);
621 int p;
622
623 wusbhc_generate_gtk(wusbhc);
624
625 for (p = 0; p < wusbhc->ports_max; p++) {
626 struct wusb_dev *wusb_dev;
627
628 wusb_dev = wusbhc->port[p].wusb_dev;
629 if (!wusb_dev || !wusb_dev->usb_dev | !wusb_dev->usb_dev->authenticated)
630 continue;
631
632 usb_fill_control_urb(wusb_dev->set_gtk_urb, wusb_dev->usb_dev,
633 usb_sndctrlpipe(wusb_dev->usb_dev, 0),
634 (void *)wusb_dev->set_gtk_req,
635 &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
636 wusbhc_set_gtk_callback, wusbhc);
637 if (usb_submit_urb(wusb_dev->set_gtk_urb, GFP_KERNEL) == 0)
638 wusbhc->pending_set_gtks++;
639 }
640 if (wusbhc->pending_set_gtks == 0)
641 wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid, &wusbhc->gtk.descr.bKeyData, key_size);
642}
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
new file mode 100644
index 000000000000..9d04722415bb
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -0,0 +1,95 @@
1/*
2 * Wire Adapter Host Controller Driver
3 * Common items to HWA and DWA based HCDs
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25#include "wusbhc.h"
26#include "wa-hc.h"
27
28/**
29 * Assumes
30 *
31 * wa->usb_dev and wa->usb_iface initialized and refcounted,
32 * wa->wa_descr initialized.
33 */
34int wa_create(struct wahc *wa, struct usb_interface *iface)
35{
36 int result;
37 struct device *dev = &iface->dev;
38
39 result = wa_rpipes_create(wa);
40 if (result < 0)
41 goto error_rpipes_create;
42 /* Fill up Data Transfer EP pointers */
43 wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;
44 wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc;
45 wa->xfer_result_size = le16_to_cpu(wa->dti_epd->wMaxPacketSize);
46 wa->xfer_result = kmalloc(wa->xfer_result_size, GFP_KERNEL);
47 if (wa->xfer_result == NULL)
48 goto error_xfer_result_alloc;
49 result = wa_nep_create(wa, iface);
50 if (result < 0) {
51 dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n",
52 result);
53 goto error_nep_create;
54 }
55 return 0;
56
57error_nep_create:
58 kfree(wa->xfer_result);
59error_xfer_result_alloc:
60 wa_rpipes_destroy(wa);
61error_rpipes_create:
62 return result;
63}
64EXPORT_SYMBOL_GPL(wa_create);
65
66
67void __wa_destroy(struct wahc *wa)
68{
69 if (wa->dti_urb) {
70 usb_kill_urb(wa->dti_urb);
71 usb_put_urb(wa->dti_urb);
72 usb_kill_urb(wa->buf_in_urb);
73 usb_put_urb(wa->buf_in_urb);
74 }
75 kfree(wa->xfer_result);
76 wa_nep_destroy(wa);
77 wa_rpipes_destroy(wa);
78}
79EXPORT_SYMBOL_GPL(__wa_destroy);
80
81/**
82 * wa_reset_all - reset the WA device
83 * @wa: the WA to be reset
84 *
85 * For HWAs the radio controller and all other PALs are also reset.
86 */
87void wa_reset_all(struct wahc *wa)
88{
89 /* FIXME: assuming HWA. */
90 wusbhc_reset_all(wa->wusb);
91}
92
93MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
94MODULE_DESCRIPTION("Wireless USB Wire Adapter core");
95MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
new file mode 100644
index 000000000000..586d350cdb4d
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -0,0 +1,417 @@
1/*
2 * HWA Host Controller Driver
3 * Wire Adapter Control/Data Streaming Iface (WUSB1.0[8])
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * This driver implements a USB Host Controller (struct usb_hcd) for a
24 * Wireless USB Host Controller based on the Wireless USB 1.0
25 * Host-Wire-Adapter specification (in layman terms, a USB-dongle that
26 * implements a Wireless USB host).
27 *
28 * Check out the Design-overview.txt file in the source documentation
29 * for other details on the implementation.
30 *
31 * Main blocks:
32 *
33 * driver glue with the driver API, workqueue daemon
34 *
35 * lc RC instance life cycle management (create, destroy...)
36 *
37 * hcd glue with the USB API Host Controller Interface API.
38 *
39 * nep Notification EndPoint managent: collect notifications
40 * and queue them with the workqueue daemon.
41 *
42 * Handle notifications as coming from the NEP. Sends them
43 * off others to their respective modules (eg: connect,
44 * disconnect and reset go to devconnect).
45 *
46 * rpipe Remote Pipe management; rpipe is what we use to write
47 * to an endpoint on a WUSB device that is connected to a
48 * HWA RC.
49 *
50 * xfer Transfer managment -- this is all the code that gets a
51 * buffer and pushes it to a device (or viceversa). *
52 *
53 * Some day a lot of this code will be shared between this driver and
54 * the drivers for DWA (xfer, rpipe).
55 *
56 * All starts at driver.c:hwahc_probe(), when one of this guys is
57 * connected. hwahc_disconnect() stops it.
58 *
59 * During operation, the main driver is devices connecting or
60 * disconnecting. They cause the HWA RC to send notifications into
61 * nep.c:hwahc_nep_cb() that will dispatch them to
62 * notif.c:wa_notif_dispatch(). From there they will fan to cause
63 * device connects, disconnects, etc.
64 *
65 * Note much of the activity is difficult to follow. For example a
66 * device connect goes to devconnect, which will cause the "fake" root
67 * hub port to show a connect and stop there. Then khubd will notice
68 * and call into the rh.c:hwahc_rc_port_reset() code to authenticate
69 * the device (and this might require user intervention) and enable
70 * the port.
71 *
72 * We also have a timer workqueue going from devconnect.c that
73 * schedules in hwahc_devconnect_create().
74 *
75 * The rest of the traffic is in the usual entry points of a USB HCD,
76 * which are hooked up in driver.c:hwahc_rc_driver, and defined in
77 * hcd.c.
78 */
79
80#ifndef __HWAHC_INTERNAL_H__
81#define __HWAHC_INTERNAL_H__
82
83#include <linux/completion.h>
84#include <linux/usb.h>
85#include <linux/mutex.h>
86#include <linux/spinlock.h>
87#include <linux/uwb.h>
88#include <linux/usb/wusb.h>
89#include <linux/usb/wusb-wa.h>
90
91struct wusbhc;
92struct wahc;
93extern void wa_urb_enqueue_run(struct work_struct *ws);
94
95/**
96 * RPipe instance
97 *
98 * @descr's fields are kept in LE, as we need to send it back and
99 * forth.
100 *
101 * @wa is referenced when set
102 *
103 * @segs_available is the number of requests segments that still can
104 * be submitted to the controller without overloading
105 * it. It is initialized to descr->wRequests when
106 * aiming.
107 *
108 * A rpipe supports a max of descr->wRequests at the same time; before
109 * submitting seg_lock has to be taken. If segs_avail > 0, then we can
110 * submit; if not, we have to queue them.
111 */
112struct wa_rpipe {
113 struct kref refcnt;
114 struct usb_rpipe_descriptor descr;
115 struct usb_host_endpoint *ep;
116 struct wahc *wa;
117 spinlock_t seg_lock;
118 struct list_head seg_list;
119 atomic_t segs_available;
120 u8 buffer[1]; /* For reads/writes on USB */
121};
122
123
124/**
125 * Instance of a HWA Host Controller
126 *
127 * Except where a more specific lock/mutex applies or atomic, all
128 * fields protected by @mutex.
129 *
130 * @wa_descr Can be accessed without locking because it is in
131 * the same area where the device descriptors were
132 * read, so it is guaranteed to exist umodified while
133 * the device exists.
134 *
135 * Endianess has been converted to CPU's.
136 *
137 * @nep_* can be accessed without locking as its processing is
138 * serialized; we submit a NEP URB and it comes to
139 * hwahc_nep_cb(), which won't issue another URB until it is
140 * done processing it.
141 *
142 * @xfer_list:
143 *
144 * List of active transfers to verify existence from a xfer id
145 * gotten from the xfer result message. Can't use urb->list because
146 * it goes by endpoint, and we don't know the endpoint at the time
147 * when we get the xfer result message. We can't really rely on the
148 * pointer (will have to change for 64 bits) as the xfer id is 32 bits.
149 *
150 * @xfer_delayed_list: List of transfers that need to be started
151 * (with a workqueue, because they were
152 * submitted from an atomic context).
153 *
154 * FIXME: this needs to be layered up: a wusbhc layer (for sharing
155 * comonalities with WHCI), a wa layer (for sharing
156 * comonalities with DWA-RC).
157 */
158struct wahc {
159 struct usb_device *usb_dev;
160 struct usb_interface *usb_iface;
161
162 /* HC to deliver notifications */
163 union {
164 struct wusbhc *wusb;
165 struct dwahc *dwa;
166 };
167
168 const struct usb_endpoint_descriptor *dto_epd, *dti_epd;
169 const struct usb_wa_descriptor *wa_descr;
170
171 struct urb *nep_urb; /* Notification EndPoint [lockless] */
172 struct edc nep_edc;
173 void *nep_buffer;
174 size_t nep_buffer_size;
175
176 atomic_t notifs_queued;
177
178 u16 rpipes;
179 unsigned long *rpipe_bm; /* rpipe usage bitmap */
180 spinlock_t rpipe_bm_lock; /* protect rpipe_bm */
181 struct mutex rpipe_mutex; /* assigning resources to endpoints */
182
183 struct urb *dti_urb; /* URB for reading xfer results */
184 struct urb *buf_in_urb; /* URB for reading data in */
185 struct edc dti_edc; /* DTI error density counter */
186 struct wa_xfer_result *xfer_result; /* real size = dti_ep maxpktsize */
187 size_t xfer_result_size;
188
189 s32 status; /* For reading status */
190
191 struct list_head xfer_list;
192 struct list_head xfer_delayed_list;
193 spinlock_t xfer_list_lock;
194 struct work_struct xfer_work;
195 atomic_t xfer_id_count;
196};
197
198
199extern int wa_create(struct wahc *wa, struct usb_interface *iface);
200extern void __wa_destroy(struct wahc *wa);
201void wa_reset_all(struct wahc *wa);
202
203
204/* Miscellaneous constants */
205enum {
206 /** Max number of EPROTO errors we tolerate on the NEP in a
207 * period of time */
208 HWAHC_EPROTO_MAX = 16,
209 /** Period of time for EPROTO errors (in jiffies) */
210 HWAHC_EPROTO_PERIOD = 4 * HZ,
211};
212
213
214/* Notification endpoint handling */
215extern int wa_nep_create(struct wahc *, struct usb_interface *);
216extern void wa_nep_destroy(struct wahc *);
217
218static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask)
219{
220 struct urb *urb = wa->nep_urb;
221 urb->transfer_buffer = wa->nep_buffer;
222 urb->transfer_buffer_length = wa->nep_buffer_size;
223 return usb_submit_urb(urb, gfp_mask);
224}
225
226static inline void wa_nep_disarm(struct wahc *wa)
227{
228 usb_kill_urb(wa->nep_urb);
229}
230
231
232/* RPipes */
233static inline void wa_rpipe_init(struct wahc *wa)
234{
235 spin_lock_init(&wa->rpipe_bm_lock);
236 mutex_init(&wa->rpipe_mutex);
237}
238
239static inline void wa_init(struct wahc *wa)
240{
241 edc_init(&wa->nep_edc);
242 atomic_set(&wa->notifs_queued, 0);
243 wa_rpipe_init(wa);
244 edc_init(&wa->dti_edc);
245 INIT_LIST_HEAD(&wa->xfer_list);
246 INIT_LIST_HEAD(&wa->xfer_delayed_list);
247 spin_lock_init(&wa->xfer_list_lock);
248 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
249 atomic_set(&wa->xfer_id_count, 1);
250}
251
252/**
253 * Destroy a pipe (when refcount drops to zero)
254 *
255 * Assumes it has been moved to the "QUIESCING" state.
256 */
257struct wa_xfer;
258extern void rpipe_destroy(struct kref *_rpipe);
259static inline
260void __rpipe_get(struct wa_rpipe *rpipe)
261{
262 kref_get(&rpipe->refcnt);
263}
264extern int rpipe_get_by_ep(struct wahc *, struct usb_host_endpoint *,
265 struct urb *, gfp_t);
266static inline void rpipe_put(struct wa_rpipe *rpipe)
267{
268 kref_put(&rpipe->refcnt, rpipe_destroy);
269
270}
271extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
272extern int wa_rpipes_create(struct wahc *);
273extern void wa_rpipes_destroy(struct wahc *);
274static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
275{
276 atomic_dec(&rpipe->segs_available);
277}
278
279/**
280 * Returns true if the rpipe is ready to submit more segments.
281 */
282static inline int rpipe_avail_inc(struct wa_rpipe *rpipe)
283{
284 return atomic_inc_return(&rpipe->segs_available) > 0
285 && !list_empty(&rpipe->seg_list);
286}
287
288
289/* Transferring data */
290extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *,
291 struct urb *, gfp_t);
292extern int wa_urb_dequeue(struct wahc *, struct urb *);
293extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
294
295
296/* Misc
297 *
298 * FIXME: Refcounting for the actual @hwahc object is not correct; I
299 * mean, this should be refcounting on the HCD underneath, but
300 * it is not. In any case, the semantics for HCD refcounting
301 * are *weird*...on refcount reaching zero it just frees
302 * it...no RC specific function is called...unless I miss
303 * something.
304 *
305 * FIXME: has to go away in favour of an 'struct' hcd based sollution
306 */
307static inline struct wahc *wa_get(struct wahc *wa)
308{
309 usb_get_intf(wa->usb_iface);
310 return wa;
311}
312
313static inline void wa_put(struct wahc *wa)
314{
315 usb_put_intf(wa->usb_iface);
316}
317
318
319static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature)
320{
321 return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
322 op ? USB_REQ_SET_FEATURE : USB_REQ_CLEAR_FEATURE,
323 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
324 feature,
325 wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
326 NULL, 0, 1000 /* FIXME: arbitrary */);
327}
328
329
330static inline int __wa_set_feature(struct wahc *wa, u16 feature)
331{
332 return __wa_feature(wa, 1, feature);
333}
334
335
336static inline int __wa_clear_feature(struct wahc *wa, u16 feature)
337{
338 return __wa_feature(wa, 0, feature);
339}
340
341
342/**
343 * Return the status of a Wire Adapter
344 *
345 * @wa: Wire Adapter instance
346 * @returns < 0 errno code on error, or status bitmap as described
347 * in WUSB1.0[8.3.1.6].
348 *
349 * NOTE: need malloc, some arches don't take USB from the stack
350 */
351static inline
352s32 __wa_get_status(struct wahc *wa)
353{
354 s32 result;
355 result = usb_control_msg(
356 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
357 USB_REQ_GET_STATUS,
358 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
359 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
360 &wa->status, sizeof(wa->status),
361 1000 /* FIXME: arbitrary */);
362 if (result >= 0)
363 result = wa->status;
364 return result;
365}
366
367
368/**
369 * Waits until the Wire Adapter's status matches @mask/@value
370 *
371 * @wa: Wire Adapter instance.
372 * @returns < 0 errno code on error, otherwise status.
373 *
374 * Loop until the WAs status matches the mask and value (status & mask
375 * == value). Timeout if it doesn't happen.
376 *
377 * FIXME: is there an official specification on how long status
378 * changes can take?
379 */
380static inline s32 __wa_wait_status(struct wahc *wa, u32 mask, u32 value)
381{
382 s32 result;
383 unsigned loops = 10;
384 do {
385 msleep(50);
386 result = __wa_get_status(wa);
387 if ((result & mask) == value)
388 break;
389 if (loops-- == 0) {
390 result = -ETIMEDOUT;
391 break;
392 }
393 } while (result >= 0);
394 return result;
395}
396
397
398/** Command @hwahc to stop, @returns 0 if ok, < 0 errno code on error */
399static inline int __wa_stop(struct wahc *wa)
400{
401 int result;
402 struct device *dev = &wa->usb_iface->dev;
403
404 result = __wa_clear_feature(wa, WA_ENABLE);
405 if (result < 0 && result != -ENODEV) {
406 dev_err(dev, "error commanding HC to stop: %d\n", result);
407 goto out;
408 }
409 result = __wa_wait_status(wa, WA_ENABLE, 0);
410 if (result < 0 && result != -ENODEV)
411 dev_err(dev, "error waiting for HC to stop: %d\n", result);
412out:
413 return 0;
414}
415
416
417#endif /* #ifndef __HWAHC_INTERNAL_H__ */
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
new file mode 100644
index 000000000000..3f542990c73f
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -0,0 +1,310 @@
1/*
2 * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
3 * Notification EndPoint support
4 *
5 * Copyright (C) 2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * This part takes care of getting the notification from the hw
24 * only and dispatching through wusbwad into
25 * wa_notif_dispatch. Handling is done there.
26 *
27 * WA notifications are limited in size; most of them are three or
28 * four bytes long, and the longest is the HWA Device Notification,
29 * which would not exceed 38 bytes (DNs are limited in payload to 32
30 * bytes plus 3 bytes header (WUSB1.0[7.6p2]), plus 3 bytes HWA
31 * header (WUSB1.0[8.5.4.2]).
32 *
33 * It is not clear if more than one Device Notification can be packed
34 * in a HWA Notification, I assume no because of the wording in
35 * WUSB1.0[8.5.4.2]. In any case, the bigger any notification could
36 * get is 256 bytes (as the bLength field is a byte).
37 *
38 * So what we do is we have this buffer and read into it; when a
39 * notification arrives we schedule work to a specific, single thread
40 * workqueue (so notifications are serialized) and copy the
41 * notification data. After scheduling the work, we rearm the read from
42 * the notification endpoint.
43 *
44 * Entry points here are:
45 *
46 * wa_nep_[create|destroy]() To initialize/release this subsystem
47 *
48 * wa_nep_cb() Callback for the notification
49 * endpoint; when data is ready, this
50 * does the dispatching.
51 */
52#include <linux/workqueue.h>
53#include <linux/ctype.h>
54#include <linux/uwb/debug.h>
55#include "wa-hc.h"
56#include "wusbhc.h"
57
58/* Structure for queueing notifications to the workqueue */
59struct wa_notif_work {
60 struct work_struct work;
61 struct wahc *wa;
62 size_t size;
63 u8 data[];
64};
65
66/*
67 * Process incoming notifications from the WA's Notification EndPoint
68 * [the wuswad daemon, basically]
69 *
70 * @_nw: Pointer to a descriptor which has the pointer to the
71 * @wa, the size of the buffer and the work queue
72 * structure (so we can free all when done).
73 * @returns 0 if ok, < 0 errno code on error.
74 *
75 * All notifications follow the same format; they need to start with a
76 * 'struct wa_notif_hdr' header, so it is easy to parse through
77 * them. We just break the buffer in individual notifications (the
78 * standard doesn't say if it can be done or is forbidden, so we are
79 * cautious) and dispatch each.
80 *
81 * So the handling layers are is:
82 *
83 * WA specific notification (from NEP)
84 * Device Notification Received -> wa_handle_notif_dn()
85 * WUSB Device notification generic handling
86 * BPST Adjustment -> wa_handle_notif_bpst_adj()
87 * ... -> ...
88 *
89 * @wa has to be referenced
90 */
91static void wa_notif_dispatch(struct work_struct *ws)
92{
93 void *itr;
94 u8 missing = 0;
95 struct wa_notif_work *nw = container_of(ws, struct wa_notif_work, work);
96 struct wahc *wa = nw->wa;
97 struct wa_notif_hdr *notif_hdr;
98 size_t size;
99
100 struct device *dev = &wa->usb_iface->dev;
101
102#if 0
103 /* FIXME: need to check for this??? */
104 if (usb_hcd->state == HC_STATE_QUIESCING) /* Going down? */
105 goto out; /* screw it */
106#endif
107 atomic_dec(&wa->notifs_queued); /* Throttling ctl */
108 dev = &wa->usb_iface->dev;
109 size = nw->size;
110 itr = nw->data;
111
112 while (size) {
113 if (size < sizeof(*notif_hdr)) {
114 missing = sizeof(*notif_hdr) - size;
115 goto exhausted_buffer;
116 }
117 notif_hdr = itr;
118 if (size < notif_hdr->bLength)
119 goto exhausted_buffer;
120 itr += notif_hdr->bLength;
121 size -= notif_hdr->bLength;
122 /* Dispatch the notification [don't use itr or size!] */
123 switch (notif_hdr->bNotifyType) {
124 case HWA_NOTIF_DN: {
125 struct hwa_notif_dn *hwa_dn;
126 hwa_dn = container_of(notif_hdr, struct hwa_notif_dn,
127 hdr);
128 wusbhc_handle_dn(wa->wusb, hwa_dn->bSourceDeviceAddr,
129 hwa_dn->dndata,
130 notif_hdr->bLength - sizeof(*hwa_dn));
131 break;
132 }
133 case WA_NOTIF_TRANSFER:
134 wa_handle_notif_xfer(wa, notif_hdr);
135 break;
136 case DWA_NOTIF_RWAKE:
137 case DWA_NOTIF_PORTSTATUS:
138 case HWA_NOTIF_BPST_ADJ:
139 /* FIXME: unimplemented WA NOTIFs */
140 /* fallthru */
141 default:
142 if (printk_ratelimit()) {
143 dev_err(dev, "HWA: unknown notification 0x%x, "
144 "%zu bytes; discarding\n",
145 notif_hdr->bNotifyType,
146 (size_t)notif_hdr->bLength);
147 dump_bytes(dev, notif_hdr, 16);
148 }
149 break;
150 }
151 }
152out:
153 wa_put(wa);
154 kfree(nw);
155 return;
156
157 /* THIS SHOULD NOT HAPPEN
158 *
159 * Buffer exahusted with partial data remaining; just warn and
160 * discard the data, as this should not happen.
161 */
162exhausted_buffer:
163 if (!printk_ratelimit())
164 goto out;
165 dev_warn(dev, "HWA: device sent short notification, "
166 "%d bytes missing; discarding %d bytes.\n",
167 missing, (int)size);
168 dump_bytes(dev, itr, size);
169 goto out;
170}
171
172/*
173 * Deliver incoming WA notifications to the wusbwa workqueue
174 *
175 * @wa: Pointer the Wire Adapter Controller Data Streaming
176 * instance (part of an 'struct usb_hcd').
177 * @size: Size of the received buffer
178 * @returns 0 if ok, < 0 errno code on error.
179 *
180 * The input buffer is @wa->nep_buffer, with @size bytes
181 * (guaranteed to fit in the allocated space,
182 * @wa->nep_buffer_size).
183 */
184static int wa_nep_queue(struct wahc *wa, size_t size)
185{
186 int result = 0;
187 struct device *dev = &wa->usb_iface->dev;
188 struct wa_notif_work *nw;
189
190 /* dev_fnstart(dev, "(wa %p, size %zu)\n", wa, size); */
191 BUG_ON(size > wa->nep_buffer_size);
192 if (size == 0)
193 goto out;
194 if (atomic_read(&wa->notifs_queued) > 200) {
195 if (printk_ratelimit())
196 dev_err(dev, "Too many notifications queued, "
197 "throttling back\n");
198 goto out;
199 }
200 nw = kzalloc(sizeof(*nw) + size, GFP_ATOMIC);
201 if (nw == NULL) {
202 if (printk_ratelimit())
203 dev_err(dev, "No memory to queue notification\n");
204 goto out;
205 }
206 INIT_WORK(&nw->work, wa_notif_dispatch);
207 nw->wa = wa_get(wa);
208 nw->size = size;
209 memcpy(nw->data, wa->nep_buffer, size);
210 atomic_inc(&wa->notifs_queued); /* Throttling ctl */
211 queue_work(wusbd, &nw->work);
212out:
213 /* dev_fnend(dev, "(wa %p, size %zu) = result\n", wa, size, result); */
214 return result;
215}
216
217/*
218 * Callback for the notification event endpoint
219 *
220 * Check's that everything is fine and then passes the data to be
221 * queued to the workqueue.
222 */
223static void wa_nep_cb(struct urb *urb)
224{
225 int result;
226 struct wahc *wa = urb->context;
227 struct device *dev = &wa->usb_iface->dev;
228
229 switch (result = urb->status) {
230 case 0:
231 result = wa_nep_queue(wa, urb->actual_length);
232 if (result < 0)
233 dev_err(dev, "NEP: unable to process notification(s): "
234 "%d\n", result);
235 break;
236 case -ECONNRESET: /* Not an error, but a controlled situation; */
237 case -ENOENT: /* (we killed the URB)...so, no broadcast */
238 case -ESHUTDOWN:
239 dev_dbg(dev, "NEP: going down %d\n", urb->status);
240 goto out;
241 default: /* On general errors, we retry unless it gets ugly */
242 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
243 EDC_ERROR_TIMEFRAME)) {
244 dev_err(dev, "NEP: URB max acceptable errors "
245 "exceeded, resetting device\n");
246 wa_reset_all(wa);
247 goto out;
248 }
249 dev_err(dev, "NEP: URB error %d\n", urb->status);
250 }
251 result = wa_nep_arm(wa, GFP_ATOMIC);
252 if (result < 0) {
253 dev_err(dev, "NEP: cannot submit URB: %d\n", result);
254 wa_reset_all(wa);
255 }
256out:
257 return;
258}
259
260/*
261 * Initialize @wa's notification and event's endpoint stuff
262 *
263 * This includes the allocating the read buffer, the context ID
264 * allocation bitmap, the URB and submitting the URB.
265 */
266int wa_nep_create(struct wahc *wa, struct usb_interface *iface)
267{
268 int result;
269 struct usb_endpoint_descriptor *epd;
270 struct usb_device *usb_dev = interface_to_usbdev(iface);
271 struct device *dev = &iface->dev;
272
273 edc_init(&wa->nep_edc);
274 epd = &iface->cur_altsetting->endpoint[0].desc;
275 wa->nep_buffer_size = 1024;
276 wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);
277 if (wa->nep_buffer == NULL) {
278 dev_err(dev, "Unable to allocate notification's read buffer\n");
279 goto error_nep_buffer;
280 }
281 wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL);
282 if (wa->nep_urb == NULL) {
283 dev_err(dev, "Unable to allocate notification URB\n");
284 goto error_urb_alloc;
285 }
286 usb_fill_int_urb(wa->nep_urb, usb_dev,
287 usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
288 wa->nep_buffer, wa->nep_buffer_size,
289 wa_nep_cb, wa, epd->bInterval);
290 result = wa_nep_arm(wa, GFP_KERNEL);
291 if (result < 0) {
292 dev_err(dev, "Cannot submit notification URB: %d\n", result);
293 goto error_nep_arm;
294 }
295 return 0;
296
297error_nep_arm:
298 usb_free_urb(wa->nep_urb);
299error_urb_alloc:
300 kfree(wa->nep_buffer);
301error_nep_buffer:
302 return -ENOMEM;
303}
304
305void wa_nep_destroy(struct wahc *wa)
306{
307 wa_nep_disarm(wa);
308 usb_free_urb(wa->nep_urb);
309 kfree(wa->nep_buffer);
310}
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
new file mode 100644
index 000000000000..f18e4aae66e9
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -0,0 +1,562 @@
1/*
2 * WUSB Wire Adapter
3 * rpipe management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * RPIPE
26 *
27 * Targetted at different downstream endpoints
28 *
29 * Descriptor: use to config the remote pipe.
30 *
31 * The number of blocks could be dynamic (wBlocks in descriptor is
32 * 0)--need to schedule them then.
33 *
34 * Each bit in wa->rpipe_bm represents if an rpipe is being used or
35 * not. Rpipes are represented with a 'struct wa_rpipe' that is
36 * attached to the hcpriv member of a 'struct usb_host_endpoint'.
37 *
38 * When you need to xfer data to an endpoint, you get an rpipe for it
39 * with wa_ep_rpipe_get(), which gives you a reference to the rpipe
40 * and keeps a single one (the first one) with the endpoint. When you
41 * are done transferring, you drop that reference. At the end the
42 * rpipe is always allocated and bound to the endpoint. There it might
43 * be recycled when not used.
44 *
45 * Addresses:
46 *
47 * We use a 1:1 mapping mechanism between port address (0 based
48 * index, actually) and the address. The USB stack knows about this.
49 *
50 * USB Stack port number 4 (1 based)
51 * WUSB code port index 3 (0 based)
52 * USB Addresss 5 (2 based -- 0 is for default, 1 for root hub)
53 *
54 * Now, because we don't use the concept as default address exactly
55 * like the (wired) USB code does, we need to kind of skip it. So we
56 * never take addresses from the urb->pipe, but from the
57 * urb->dev->devnum, to make sure that we always have the right
58 * destination address.
59 */
60#include <linux/init.h>
61#include <asm/atomic.h>
62#include <linux/bitmap.h>
63#include "wusbhc.h"
64#include "wa-hc.h"
65
66#define D_LOCAL 0
67#include <linux/uwb/debug.h>
68
69
70static int __rpipe_get_descr(struct wahc *wa,
71 struct usb_rpipe_descriptor *descr, u16 index)
72{
73 ssize_t result;
74 struct device *dev = &wa->usb_iface->dev;
75
76 /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor()
77 * function because the arguments are different.
78 */
79 d_printf(1, dev, "rpipe %u: get descr\n", index);
80 result = usb_control_msg(
81 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
82 USB_REQ_GET_DESCRIPTOR,
83 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
84 USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
85 1000 /* FIXME: arbitrary */);
86 if (result < 0) {
87 dev_err(dev, "rpipe %u: get descriptor failed: %d\n",
88 index, (int)result);
89 goto error;
90 }
91 if (result < sizeof(*descr)) {
92 dev_err(dev, "rpipe %u: got short descriptor "
93 "(%zd vs %zd bytes needed)\n",
94 index, result, sizeof(*descr));
95 result = -EINVAL;
96 goto error;
97 }
98 result = 0;
99
100error:
101 return result;
102}
103
104/*
105 *
106 * The descriptor is assumed to be properly initialized (ie: you got
107 * it through __rpipe_get_descr()).
108 */
109static int __rpipe_set_descr(struct wahc *wa,
110 struct usb_rpipe_descriptor *descr, u16 index)
111{
112 ssize_t result;
113 struct device *dev = &wa->usb_iface->dev;
114
115 /* we cannot use the usb_get_descriptor() function because the
116 * arguments are different.
117 */
118 d_printf(1, dev, "rpipe %u: set descr\n", index);
119 result = usb_control_msg(
120 wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
121 USB_REQ_SET_DESCRIPTOR,
122 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
123 USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
124 HZ / 10);
125 if (result < 0) {
126 dev_err(dev, "rpipe %u: set descriptor failed: %d\n",
127 index, (int)result);
128 goto error;
129 }
130 if (result < sizeof(*descr)) {
131 dev_err(dev, "rpipe %u: sent short descriptor "
132 "(%zd vs %zd bytes required)\n",
133 index, result, sizeof(*descr));
134 result = -EINVAL;
135 goto error;
136 }
137 result = 0;
138
139error:
140 return result;
141
142}
143
144static void rpipe_init(struct wa_rpipe *rpipe)
145{
146 kref_init(&rpipe->refcnt);
147 spin_lock_init(&rpipe->seg_lock);
148 INIT_LIST_HEAD(&rpipe->seg_list);
149}
150
151static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
152{
153 unsigned long flags;
154
155 spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
156 rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
157 if (rpipe_idx < wa->rpipes)
158 set_bit(rpipe_idx, wa->rpipe_bm);
159 spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
160
161 return rpipe_idx;
162}
163
164static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)
165{
166 unsigned long flags;
167
168 spin_lock_irqsave(&wa->rpipe_bm_lock, flags);
169 clear_bit(rpipe_idx, wa->rpipe_bm);
170 spin_unlock_irqrestore(&wa->rpipe_bm_lock, flags);
171}
172
173void rpipe_destroy(struct kref *_rpipe)
174{
175 struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt);
176 u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
177 d_fnstart(1, NULL, "(rpipe %p %u)\n", rpipe, index);
178 if (rpipe->ep)
179 rpipe->ep->hcpriv = NULL;
180 rpipe_put_idx(rpipe->wa, index);
181 wa_put(rpipe->wa);
182 kfree(rpipe);
183 d_fnend(1, NULL, "(rpipe %p %u)\n", rpipe, index);
184}
185EXPORT_SYMBOL_GPL(rpipe_destroy);
186
187/*
188 * Locate an idle rpipe, create an structure for it and return it
189 *
190 * @wa is referenced and unlocked
191 * @crs enum rpipe_attr, required endpoint characteristics
192 *
193 * The rpipe can be used only sequentially (not in parallel).
194 *
195 * The rpipe is moved into the "ready" state.
196 */
197static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs,
198 gfp_t gfp)
199{
200 int result;
201 unsigned rpipe_idx;
202 struct wa_rpipe *rpipe;
203 struct device *dev = &wa->usb_iface->dev;
204
205 d_fnstart(3, dev, "(wa %p crs 0x%02x)\n", wa, crs);
206 rpipe = kzalloc(sizeof(*rpipe), gfp);
207 if (rpipe == NULL)
208 return -ENOMEM;
209 rpipe_init(rpipe);
210
211 /* Look for an idle pipe */
212 for (rpipe_idx = 0; rpipe_idx < wa->rpipes; rpipe_idx++) {
213 rpipe_idx = rpipe_get_idx(wa, rpipe_idx);
214 if (rpipe_idx >= wa->rpipes) /* no more pipes :( */
215 break;
216 result = __rpipe_get_descr(wa, &rpipe->descr, rpipe_idx);
217 if (result < 0)
218 dev_err(dev, "Can't get descriptor for rpipe %u: %d\n",
219 rpipe_idx, result);
220 else if ((rpipe->descr.bmCharacteristics & crs) != 0)
221 goto found;
222 rpipe_put_idx(wa, rpipe_idx);
223 }
224 *prpipe = NULL;
225 kfree(rpipe);
226 d_fnend(3, dev, "(wa %p crs 0x%02x) = -ENXIO\n", wa, crs);
227 return -ENXIO;
228
229found:
230 set_bit(rpipe_idx, wa->rpipe_bm);
231 rpipe->wa = wa_get(wa);
232 *prpipe = rpipe;
233 d_fnstart(3, dev, "(wa %p crs 0x%02x) = 0\n", wa, crs);
234 return 0;
235}
236
237static int __rpipe_reset(struct wahc *wa, unsigned index)
238{
239 int result;
240 struct device *dev = &wa->usb_iface->dev;
241
242 d_printf(1, dev, "rpipe %u: reset\n", index);
243 result = usb_control_msg(
244 wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
245 USB_REQ_RPIPE_RESET,
246 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
247 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
248 if (result < 0)
249 dev_err(dev, "rpipe %u: reset failed: %d\n",
250 index, result);
251 return result;
252}
253
254/*
255 * Fake companion descriptor for ep0
256 *
257 * See WUSB1.0[7.4.4], most of this is zero for bulk/int/ctl
258 */
259static struct usb_wireless_ep_comp_descriptor epc0 = {
260 .bLength = sizeof(epc0),
261 .bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP,
262/* .bMaxBurst = 1, */
263 .bMaxSequence = 31,
264};
265
266/*
267 * Look for EP companion descriptor
268 *
269 * Get there, look for Inara in the endpoint's extra descriptors
270 */
271static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
272 struct device *dev, struct usb_host_endpoint *ep)
273{
274 void *itr;
275 size_t itr_size;
276 struct usb_descriptor_header *hdr;
277 struct usb_wireless_ep_comp_descriptor *epcd;
278
279 d_fnstart(3, dev, "(ep %p)\n", ep);
280 if (ep->desc.bEndpointAddress == 0) {
281 epcd = &epc0;
282 goto out;
283 }
284 itr = ep->extra;
285 itr_size = ep->extralen;
286 epcd = NULL;
287 while (itr_size > 0) {
288 if (itr_size < sizeof(*hdr)) {
289 dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors "
290 "at offset %zu: only %zu bytes left\n",
291 ep->desc.bEndpointAddress,
292 itr - (void *) ep->extra, itr_size);
293 break;
294 }
295 hdr = itr;
296 if (hdr->bDescriptorType == USB_DT_WIRELESS_ENDPOINT_COMP) {
297 epcd = itr;
298 break;
299 }
300 if (hdr->bLength > itr_size) {
301 dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor "
302 "at offset %zu (type 0x%02x) "
303 "length %d but only %zu bytes left\n",
304 ep->desc.bEndpointAddress,
305 itr - (void *) ep->extra, hdr->bDescriptorType,
306 hdr->bLength, itr_size);
307 break;
308 }
309 itr += hdr->bLength;
310 itr_size -= hdr->bDescriptorType;
311 }
312out:
313 d_fnend(3, dev, "(ep %p) = %p\n", ep, epcd);
314 return epcd;
315}
316
317/*
318 * Aim an rpipe to its device & endpoint destination
319 *
320 * Make sure we change the address to unauthenticathed if the device
321 * is WUSB and it is not authenticated.
322 */
323static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
324 struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp)
325{
326 int result = -ENOMSG; /* better code for lack of companion? */
327 struct device *dev = &wa->usb_iface->dev;
328 struct usb_device *usb_dev = urb->dev;
329 struct usb_wireless_ep_comp_descriptor *epcd;
330 u8 unauth;
331
332 d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n",
333 rpipe, wa, ep, urb);
334 epcd = rpipe_epc_find(dev, ep);
335 if (epcd == NULL) {
336 dev_err(dev, "ep 0x%02x: can't find companion descriptor\n",
337 ep->desc.bEndpointAddress);
338 goto error;
339 }
340 unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;
341 __rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex));
342 atomic_set(&rpipe->segs_available, le16_to_cpu(rpipe->descr.wRequests));
343 /* FIXME: block allocation system; request with queuing and timeout */
344 /* FIXME: compute so seg_size > ep->maxpktsize */
345 rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
346 /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
347 rpipe->descr.wMaxPacketSize = cpu_to_le16(ep->desc.wMaxPacketSize);
348 rpipe->descr.bHSHubAddress = 0; /* reserved: zero */
349 rpipe->descr.bHSHubPort = wusb_port_no_to_idx(urb->dev->portnum);
350 /* FIXME: use maximum speed as supported or recommended by device */
351 rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ?
352 UWB_PHY_RATE_53 : UWB_PHY_RATE_200;
353 d_printf(2, dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n",
354 urb->dev->devnum, urb->dev->devnum | unauth,
355 le16_to_cpu(rpipe->descr.wRPipeIndex),
356 usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed);
357 /* see security.c:wusb_update_address() */
358 if (unlikely(urb->dev->devnum == 0x80))
359 rpipe->descr.bDeviceAddress = 0;
360 else
361 rpipe->descr.bDeviceAddress = urb->dev->devnum | unauth;
362 rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress;
363 /* FIXME: bDataSequence */
364 rpipe->descr.bDataSequence = 0;
365 /* FIXME: dwCurrentWindow */
366 rpipe->descr.dwCurrentWindow = cpu_to_le32(1);
367 /* FIXME: bMaxDataSequence */
368 rpipe->descr.bMaxDataSequence = epcd->bMaxSequence - 1;
369 rpipe->descr.bInterval = ep->desc.bInterval;
370 /* FIXME: bOverTheAirInterval */
371 rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */
372 /* FIXME: xmit power & preamble blah blah */
373 rpipe->descr.bmAttribute = ep->desc.bmAttributes & 0x03;
374 /* rpipe->descr.bmCharacteristics RO */
375 /* FIXME: bmRetryOptions */
376 rpipe->descr.bmRetryOptions = 15;
377 /* FIXME: use for assessing link quality? */
378 rpipe->descr.wNumTransactionErrors = 0;
379 result = __rpipe_set_descr(wa, &rpipe->descr,
380 le16_to_cpu(rpipe->descr.wRPipeIndex));
381 if (result < 0) {
382 dev_err(dev, "Cannot aim rpipe: %d\n", result);
383 goto error;
384 }
385 result = 0;
386error:
387 d_fnend(3, dev, "(rpipe %p wa %p ep %p urb %p) = %d\n",
388 rpipe, wa, ep, urb, result);
389 return result;
390}
391
392/*
393 * Check an aimed rpipe to make sure it points to where we want
394 *
395 * We use bit 19 of the Linux USB pipe bitmap for unauth vs auth
396 * space; when it is like that, we or 0x80 to make an unauth address.
397 */
398static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
399 const struct usb_host_endpoint *ep,
400 const struct urb *urb, gfp_t gfp)
401{
402 int result = 0; /* better code for lack of companion? */
403 struct device *dev = &wa->usb_iface->dev;
404 struct usb_device *usb_dev = urb->dev;
405 u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0;
406 u8 portnum = wusb_port_no_to_idx(urb->dev->portnum);
407
408 d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n",
409 rpipe, wa, ep, urb);
410#define AIM_CHECK(rdf, val, text) \
411 do { \
412 if (rpipe->descr.rdf != (val)) { \
413 dev_err(dev, \
414 "rpipe aim discrepancy: " #rdf " " text "\n", \
415 rpipe->descr.rdf, (val)); \
416 result = -EINVAL; \
417 WARN_ON(1); \
418 } \
419 } while (0)
420 AIM_CHECK(wMaxPacketSize, cpu_to_le16(ep->desc.wMaxPacketSize),
421 "(%u vs %u)");
422 AIM_CHECK(bHSHubPort, portnum, "(%u vs %u)");
423 AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ?
424 UWB_PHY_RATE_53 : UWB_PHY_RATE_200,
425 "(%u vs %u)");
426 AIM_CHECK(bDeviceAddress, urb->dev->devnum | unauth, "(%u vs %u)");
427 AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)");
428 AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)");
429 AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)");
430#undef AIM_CHECK
431 return result;
432}
433
434#ifndef CONFIG_BUG
435#define CONFIG_BUG 0
436#endif
437
438/*
439 * Make sure there is an rpipe allocated for an endpoint
440 *
441 * If already allocated, we just refcount it; if not, we get an
442 * idle one, aim it to the right location and take it.
443 *
444 * Attaches to ep->hcpriv and rpipe->ep to ep.
445 */
446int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep,
447 struct urb *urb, gfp_t gfp)
448{
449 int result = 0;
450 struct device *dev = &wa->usb_iface->dev;
451 struct wa_rpipe *rpipe;
452 u8 eptype;
453
454 d_fnstart(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb,
455 gfp);
456 mutex_lock(&wa->rpipe_mutex);
457 rpipe = ep->hcpriv;
458 if (rpipe != NULL) {
459 if (CONFIG_BUG == 1) {
460 result = rpipe_check_aim(rpipe, wa, ep, urb, gfp);
461 if (result < 0)
462 goto error;
463 }
464 __rpipe_get(rpipe);
465 d_printf(2, dev, "ep 0x%02x: reusing rpipe %u\n",
466 ep->desc.bEndpointAddress,
467 le16_to_cpu(rpipe->descr.wRPipeIndex));
468 } else {
469 /* hmm, assign idle rpipe, aim it */
470 result = -ENOBUFS;
471 eptype = ep->desc.bmAttributes & 0x03;
472 result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp);
473 if (result < 0)
474 goto error;
475 result = rpipe_aim(rpipe, wa, ep, urb, gfp);
476 if (result < 0) {
477 rpipe_put(rpipe);
478 goto error;
479 }
480 ep->hcpriv = rpipe;
481 rpipe->ep = ep;
482 __rpipe_get(rpipe); /* for caching into ep->hcpriv */
483 d_printf(2, dev, "ep 0x%02x: using rpipe %u\n",
484 ep->desc.bEndpointAddress,
485 le16_to_cpu(rpipe->descr.wRPipeIndex));
486 }
487 d_dump(4, dev, &rpipe->descr, sizeof(rpipe->descr));
488error:
489 mutex_unlock(&wa->rpipe_mutex);
490 d_fnend(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, gfp);
491 return result;
492}
493
494/*
495 * Allocate the bitmap for each rpipe.
496 */
497int wa_rpipes_create(struct wahc *wa)
498{
499 wa->rpipes = wa->wa_descr->wNumRPipes;
500 wa->rpipe_bm = kzalloc(BITS_TO_LONGS(wa->rpipes)*sizeof(unsigned long),
501 GFP_KERNEL);
502 if (wa->rpipe_bm == NULL)
503 return -ENOMEM;
504 return 0;
505}
506
507void wa_rpipes_destroy(struct wahc *wa)
508{
509 struct device *dev = &wa->usb_iface->dev;
510 d_fnstart(3, dev, "(wa %p)\n", wa);
511 if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) {
512 char buf[256];
513 WARN_ON(1);
514 bitmap_scnprintf(buf, sizeof(buf), wa->rpipe_bm, wa->rpipes);
515 dev_err(dev, "BUG: pipes not released on exit: %s\n", buf);
516 }
517 kfree(wa->rpipe_bm);
518 d_fnend(3, dev, "(wa %p)\n", wa);
519}
520
521/*
522 * Release resources allocated for an endpoint
523 *
524 * If there is an associated rpipe to this endpoint, Abort any pending
525 * transfers and put it. If the rpipe ends up being destroyed,
526 * __rpipe_destroy() will cleanup ep->hcpriv.
527 *
528 * This is called before calling hcd->stop(), so you don't need to do
529 * anything else in there.
530 */
531void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
532{
533 struct device *dev = &wa->usb_iface->dev;
534 struct wa_rpipe *rpipe;
535 d_fnstart(2, dev, "(wa %p ep %p)\n", wa, ep);
536 mutex_lock(&wa->rpipe_mutex);
537 rpipe = ep->hcpriv;
538 if (rpipe != NULL) {
539 unsigned rc = atomic_read(&rpipe->refcnt.refcount);
540 int result;
541 u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
542
543 if (rc != 1)
544 d_printf(1, dev, "(wa %p ep %p) rpipe %p refcnt %u\n",
545 wa, ep, rpipe, rc);
546
547 d_printf(1, dev, "rpipe %u: abort\n", index);
548 result = usb_control_msg(
549 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
550 USB_REQ_RPIPE_ABORT,
551 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
552 0, index, NULL, 0, 1000 /* FIXME: arbitrary */);
553 if (result < 0 && result != -ENODEV /* dev is gone */)
554 d_printf(1, dev, "(wa %p rpipe %u): abort failed: %d\n",
555 wa, index, result);
556 rpipe_put(rpipe);
557 }
558 mutex_unlock(&wa->rpipe_mutex);
559 d_fnend(2, dev, "(wa %p ep %p)\n", wa, ep);
560 return;
561}
562EXPORT_SYMBOL_GPL(rpipe_ep_disable);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
new file mode 100644
index 000000000000..c038635d1c64
--- /dev/null
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -0,0 +1,1709 @@
1/*
2 * WUSB Wire Adapter
3 * Data transfer and URB enqueing
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
27 *
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
32 *
33 * Sounds simple, it is a pain to implement.
34 *
35 *
36 * ENTRY POINTS
37 *
38 * FIXME
39 *
40 * LIFE CYCLE / STATE DIAGRAM
41 *
42 * FIXME
43 *
44 * THIS CODE IS DISGUSTING
45 *
46 * Warned you are; it's my second try and still not happy with it.
47 *
48 * NOTES:
49 *
50 * - No iso
51 *
52 * - Supports DMA xfers, control, bulk and maybe interrupt
53 *
54 * - Does not recycle unused rpipes
55 *
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
61 *
62 * Two methods it could be done:
63 *
64 * (a) set up a timer everytime an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
68 *
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
73 *
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different requried components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
81 */
82#include <linux/init.h>
83#include <linux/spinlock.h>
84#include <linux/hash.h>
85#include "wa-hc.h"
86#include "wusbhc.h"
87
88#undef D_LOCAL
89#define D_LOCAL 0 /* 0 disabled, > 0 different levels... */
90#include <linux/uwb/debug.h>
91
92enum {
93 WA_SEGS_MAX = 255,
94};
95
96enum wa_seg_status {
97 WA_SEG_NOTREADY,
98 WA_SEG_READY,
99 WA_SEG_DELAYED,
100 WA_SEG_SUBMITTED,
101 WA_SEG_PENDING,
102 WA_SEG_DTI_PENDING,
103 WA_SEG_DONE,
104 WA_SEG_ERROR,
105 WA_SEG_ABORTED,
106};
107
108static void wa_xfer_delayed_run(struct wa_rpipe *);
109
110/*
111 * Life cycle governed by 'struct urb' (the refcount of the struct is
112 * that of the 'struct urb' and usb_free_urb() would free the whole
113 * struct).
114 */
115struct wa_seg {
116 struct urb urb;
117 struct urb *dto_urb; /* for data output? */
118 struct list_head list_node; /* for rpipe->req_list */
119 struct wa_xfer *xfer; /* out xfer */
120 u8 index; /* which segment we are */
121 enum wa_seg_status status;
122 ssize_t result; /* bytes xfered or error */
123 struct wa_xfer_hdr xfer_hdr;
124 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
125};
126
127static void wa_seg_init(struct wa_seg *seg)
128{
129 /* usb_init_urb() repeats a lot of work, so we do it here */
130 kref_init(&seg->urb.kref);
131}
132
133/*
134 * Protected by xfer->lock
135 *
136 */
137struct wa_xfer {
138 struct kref refcnt;
139 struct list_head list_node;
140 spinlock_t lock;
141 u32 id;
142
143 struct wahc *wa; /* Wire adapter we are plugged to */
144 struct usb_host_endpoint *ep;
145 struct urb *urb; /* URB we are transfering for */
146 struct wa_seg **seg; /* transfer segments */
147 u8 segs, segs_submitted, segs_done;
148 unsigned is_inbound:1;
149 unsigned is_dma:1;
150 size_t seg_size;
151 int result;
152
153 gfp_t gfp; /* allocation mask */
154
155 struct wusb_dev *wusb_dev; /* for activity timestamps */
156};
157
158static inline void wa_xfer_init(struct wa_xfer *xfer)
159{
160 kref_init(&xfer->refcnt);
161 INIT_LIST_HEAD(&xfer->list_node);
162 spin_lock_init(&xfer->lock);
163}
164
165/*
166 * Destory a transfer structure
167 *
168 * Note that the xfer->seg[index] thingies follow the URB life cycle,
169 * so we need to put them, not free them.
170 */
171static void wa_xfer_destroy(struct kref *_xfer)
172{
173 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
174 if (xfer->seg) {
175 unsigned cnt;
176 for (cnt = 0; cnt < xfer->segs; cnt++) {
177 if (xfer->is_inbound)
178 usb_put_urb(xfer->seg[cnt]->dto_urb);
179 usb_put_urb(&xfer->seg[cnt]->urb);
180 }
181 }
182 kfree(xfer);
183 d_printf(2, NULL, "xfer %p destroyed\n", xfer);
184}
185
186static void wa_xfer_get(struct wa_xfer *xfer)
187{
188 kref_get(&xfer->refcnt);
189}
190
191static void wa_xfer_put(struct wa_xfer *xfer)
192{
193 d_fnstart(3, NULL, "(xfer %p) -- ref count bef put %d\n",
194 xfer, atomic_read(&xfer->refcnt.refcount));
195 kref_put(&xfer->refcnt, wa_xfer_destroy);
196 d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
197}
198
199/*
200 * xfer is referenced
201 *
202 * xfer->lock has to be unlocked
203 *
204 * We take xfer->lock for setting the result; this is a barrier
205 * against drivers/usb/core/hcd.c:unlink1() being called after we call
206 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
207 * reference to the transfer.
208 */
209static void wa_xfer_giveback(struct wa_xfer *xfer)
210{
211 unsigned long flags;
212 d_fnstart(3, NULL, "(xfer %p)\n", xfer);
213 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
214 list_del_init(&xfer->list_node);
215 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
216 /* FIXME: segmentation broken -- kills DWA */
217 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
218 wa_put(xfer->wa);
219 wa_xfer_put(xfer);
220 d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
221}
222
223/*
224 * xfer is referenced
225 *
226 * xfer->lock has to be unlocked
227 */
228static void wa_xfer_completion(struct wa_xfer *xfer)
229{
230 d_fnstart(3, NULL, "(xfer %p)\n", xfer);
231 if (xfer->wusb_dev)
232 wusb_dev_put(xfer->wusb_dev);
233 rpipe_put(xfer->ep->hcpriv);
234 wa_xfer_giveback(xfer);
235 d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
236 return;
237}
238
239/*
240 * If transfer is done, wrap it up and return true
241 *
242 * xfer->lock has to be locked
243 */
244static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
245{
246 unsigned result, cnt;
247 struct wa_seg *seg;
248 struct urb *urb = xfer->urb;
249 unsigned found_short = 0;
250
251 d_fnstart(3, NULL, "(xfer %p)\n", xfer);
252 result = xfer->segs_done == xfer->segs_submitted;
253 if (result == 0)
254 goto out;
255 urb->actual_length = 0;
256 for (cnt = 0; cnt < xfer->segs; cnt++) {
257 seg = xfer->seg[cnt];
258 switch (seg->status) {
259 case WA_SEG_DONE:
260 if (found_short && seg->result > 0) {
261 if (printk_ratelimit())
262 printk(KERN_ERR "xfer %p#%u: bad short "
263 "segments (%zu)\n", xfer, cnt,
264 seg->result);
265 urb->status = -EINVAL;
266 goto out;
267 }
268 urb->actual_length += seg->result;
269 if (seg->result < xfer->seg_size
270 && cnt != xfer->segs-1)
271 found_short = 1;
272 d_printf(2, NULL, "xfer %p#%u: DONE short %d "
273 "result %zu urb->actual_length %d\n",
274 xfer, seg->index, found_short, seg->result,
275 urb->actual_length);
276 break;
277 case WA_SEG_ERROR:
278 xfer->result = seg->result;
279 d_printf(2, NULL, "xfer %p#%u: ERROR result %zu\n",
280 xfer, seg->index, seg->result);
281 goto out;
282 case WA_SEG_ABORTED:
283 WARN_ON(urb->status != -ECONNRESET
284 && urb->status != -ENOENT);
285 d_printf(2, NULL, "xfer %p#%u ABORTED: result %d\n",
286 xfer, seg->index, urb->status);
287 xfer->result = urb->status;
288 goto out;
289 default:
290 /* if (printk_ratelimit()) */
291 printk(KERN_ERR "xfer %p#%u: "
292 "is_done bad state %d\n",
293 xfer, cnt, seg->status);
294 xfer->result = -EINVAL;
295 WARN_ON(1);
296 goto out;
297 }
298 }
299 xfer->result = 0;
300out:
301 d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
302 return result;
303}
304
305/*
306 * Initialize a transfer's ID
307 *
308 * We need to use a sequential number; if we use the pointer or the
309 * hash of the pointer, it can repeat over sequential transfers and
310 * then it will confuse the HWA....wonder why in hell they put a 32
311 * bit handle in there then.
312 */
313static void wa_xfer_id_init(struct wa_xfer *xfer)
314{
315 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
316}
317
318/*
319 * Return the xfer's ID associated with xfer
320 *
321 * Need to generate a
322 */
323static u32 wa_xfer_id(struct wa_xfer *xfer)
324{
325 return xfer->id;
326}
327
328/*
329 * Search for a transfer list ID on the HCD's URB list
330 *
331 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
332 * 32-bit hash of the pointer.
333 *
334 * @returns NULL if not found.
335 */
336static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
337{
338 unsigned long flags;
339 struct wa_xfer *xfer_itr;
340 spin_lock_irqsave(&wa->xfer_list_lock, flags);
341 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
342 if (id == xfer_itr->id) {
343 wa_xfer_get(xfer_itr);
344 goto out;
345 }
346 }
347 xfer_itr = NULL;
348out:
349 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
350 return xfer_itr;
351}
352
353struct wa_xfer_abort_buffer {
354 struct urb urb;
355 struct wa_xfer_abort cmd;
356};
357
358static void __wa_xfer_abort_cb(struct urb *urb)
359{
360 struct wa_xfer_abort_buffer *b = urb->context;
361 usb_put_urb(&b->urb);
362}
363
364/*
365 * Aborts an ongoing transaction
366 *
367 * Assumes the transfer is referenced and locked and in a submitted
368 * state (mainly that there is an endpoint/rpipe assigned).
369 *
370 * The callback (see above) does nothing but freeing up the data by
371 * putting the URB. Because the URB is allocated at the head of the
372 * struct, the whole space we allocated is kfreed.
373 *
374 * We'll get an 'aborted transaction' xfer result on DTI, that'll
375 * politely ignore because at this point the transaction has been
376 * marked as aborted already.
377 */
378static void __wa_xfer_abort(struct wa_xfer *xfer)
379{
380 int result;
381 struct device *dev = &xfer->wa->usb_iface->dev;
382 struct wa_xfer_abort_buffer *b;
383 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
384
385 b = kmalloc(sizeof(*b), GFP_ATOMIC);
386 if (b == NULL)
387 goto error_kmalloc;
388 b->cmd.bLength = sizeof(b->cmd);
389 b->cmd.bRequestType = WA_XFER_ABORT;
390 b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
391 b->cmd.dwTransferID = wa_xfer_id(xfer);
392
393 usb_init_urb(&b->urb);
394 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
395 usb_sndbulkpipe(xfer->wa->usb_dev,
396 xfer->wa->dto_epd->bEndpointAddress),
397 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
398 result = usb_submit_urb(&b->urb, GFP_ATOMIC);
399 if (result < 0)
400 goto error_submit;
401 return; /* callback frees! */
402
403
404error_submit:
405 if (printk_ratelimit())
406 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
407 xfer, result);
408 kfree(b);
409error_kmalloc:
410 return;
411
412}
413
414/*
415 *
416 * @returns < 0 on error, transfer segment request size if ok
417 */
418static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
419 enum wa_xfer_type *pxfer_type)
420{
421 ssize_t result;
422 struct device *dev = &xfer->wa->usb_iface->dev;
423 size_t maxpktsize;
424 struct urb *urb = xfer->urb;
425 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
426
427 d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n",
428 xfer, rpipe, urb);
429 switch (rpipe->descr.bmAttribute & 0x3) {
430 case USB_ENDPOINT_XFER_CONTROL:
431 *pxfer_type = WA_XFER_TYPE_CTL;
432 result = sizeof(struct wa_xfer_ctl);
433 break;
434 case USB_ENDPOINT_XFER_INT:
435 case USB_ENDPOINT_XFER_BULK:
436 *pxfer_type = WA_XFER_TYPE_BI;
437 result = sizeof(struct wa_xfer_bi);
438 break;
439 case USB_ENDPOINT_XFER_ISOC:
440 dev_err(dev, "FIXME: ISOC not implemented\n");
441 result = -ENOSYS;
442 goto error;
443 default:
444 /* never happens */
445 BUG();
446 result = -EINVAL; /* shut gcc up */
447 };
448 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
449 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
450 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
451 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
452 /* Compute the segment size and make sure it is a multiple of
453 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
454 * a check (FIXME) */
455 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
456 if (xfer->seg_size < maxpktsize) {
457 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
458 "%zu\n", xfer->seg_size, maxpktsize);
459 result = -EINVAL;
460 goto error;
461 }
462 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
463 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
464 / xfer->seg_size;
465 if (xfer->segs >= WA_SEGS_MAX) {
466 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
467 (int)(urb->transfer_buffer_length / xfer->seg_size),
468 WA_SEGS_MAX);
469 result = -EINVAL;
470 goto error;
471 }
472 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
473 xfer->segs = 1;
474error:
475 d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n",
476 xfer, rpipe, urb, (int)result);
477 return result;
478}
479
480/** Fill in the common request header and xfer-type specific data. */
481static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
482 struct wa_xfer_hdr *xfer_hdr0,
483 enum wa_xfer_type xfer_type,
484 size_t xfer_hdr_size)
485{
486 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
487
488 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
489 xfer_hdr0->bLength = xfer_hdr_size;
490 xfer_hdr0->bRequestType = xfer_type;
491 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
492 xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
493 xfer_hdr0->bTransferSegment = 0;
494 switch (xfer_type) {
495 case WA_XFER_TYPE_CTL: {
496 struct wa_xfer_ctl *xfer_ctl =
497 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
498 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
499 BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP
500 && xfer->urb->setup_packet == NULL);
501 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
502 sizeof(xfer_ctl->baSetupData));
503 break;
504 }
505 case WA_XFER_TYPE_BI:
506 break;
507 case WA_XFER_TYPE_ISO:
508 printk(KERN_ERR "FIXME: ISOC not implemented\n");
509 default:
510 BUG();
511 };
512}
513
514/*
515 * Callback for the OUT data phase of the segment request
516 *
517 * Check wa_seg_cb(); most comments also apply here because this
518 * function does almost the same thing and they work closely
519 * together.
520 *
521 * If the seg request has failed but this DTO phase has suceeded,
522 * wa_seg_cb() has already failed the segment and moved the
523 * status to WA_SEG_ERROR, so this will go through 'case 0' and
524 * effectively do nothing.
525 */
526static void wa_seg_dto_cb(struct urb *urb)
527{
528 struct wa_seg *seg = urb->context;
529 struct wa_xfer *xfer = seg->xfer;
530 struct wahc *wa;
531 struct device *dev;
532 struct wa_rpipe *rpipe;
533 unsigned long flags;
534 unsigned rpipe_ready = 0;
535 u8 done = 0;
536
537 d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
538 switch (urb->status) {
539 case 0:
540 spin_lock_irqsave(&xfer->lock, flags);
541 wa = xfer->wa;
542 dev = &wa->usb_iface->dev;
543 d_printf(2, dev, "xfer %p#%u: data out done (%d bytes)\n",
544 xfer, seg->index, urb->actual_length);
545 if (seg->status < WA_SEG_PENDING)
546 seg->status = WA_SEG_PENDING;
547 seg->result = urb->actual_length;
548 spin_unlock_irqrestore(&xfer->lock, flags);
549 break;
550 case -ECONNRESET: /* URB unlinked; no need to do anything */
551 case -ENOENT: /* as it was done by the who unlinked us */
552 break;
553 default: /* Other errors ... */
554 spin_lock_irqsave(&xfer->lock, flags);
555 wa = xfer->wa;
556 dev = &wa->usb_iface->dev;
557 rpipe = xfer->ep->hcpriv;
558 if (printk_ratelimit())
559 dev_err(dev, "xfer %p#%u: data out error %d\n",
560 xfer, seg->index, urb->status);
561 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
562 EDC_ERROR_TIMEFRAME)){
563 dev_err(dev, "DTO: URB max acceptable errors "
564 "exceeded, resetting device\n");
565 wa_reset_all(wa);
566 }
567 if (seg->status != WA_SEG_ERROR) {
568 seg->status = WA_SEG_ERROR;
569 seg->result = urb->status;
570 xfer->segs_done++;
571 __wa_xfer_abort(xfer);
572 rpipe_ready = rpipe_avail_inc(rpipe);
573 done = __wa_xfer_is_done(xfer);
574 }
575 spin_unlock_irqrestore(&xfer->lock, flags);
576 if (done)
577 wa_xfer_completion(xfer);
578 if (rpipe_ready)
579 wa_xfer_delayed_run(rpipe);
580 }
581 d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
582}
583
584/*
585 * Callback for the segment request
586 *
587 * If succesful transition state (unless already transitioned or
588 * outbound transfer); otherwise, take a note of the error, mark this
589 * segment done and try completion.
590 *
591 * Note we don't access until we are sure that the transfer hasn't
592 * been cancelled (ECONNRESET, ENOENT), which could mean that
593 * seg->xfer could be already gone.
594 *
595 * We have to check before setting the status to WA_SEG_PENDING
596 * because sometimes the xfer result callback arrives before this
597 * callback (geeeeeeze), so it might happen that we are already in
598 * another state. As well, we don't set it if the transfer is inbound,
599 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
600 * finishes.
601 */
602static void wa_seg_cb(struct urb *urb)
603{
604 struct wa_seg *seg = urb->context;
605 struct wa_xfer *xfer = seg->xfer;
606 struct wahc *wa;
607 struct device *dev;
608 struct wa_rpipe *rpipe;
609 unsigned long flags;
610 unsigned rpipe_ready;
611 u8 done = 0;
612
613 d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
614 switch (urb->status) {
615 case 0:
616 spin_lock_irqsave(&xfer->lock, flags);
617 wa = xfer->wa;
618 dev = &wa->usb_iface->dev;
619 d_printf(2, dev, "xfer %p#%u: request done\n",
620 xfer, seg->index);
621 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
622 seg->status = WA_SEG_PENDING;
623 spin_unlock_irqrestore(&xfer->lock, flags);
624 break;
625 case -ECONNRESET: /* URB unlinked; no need to do anything */
626 case -ENOENT: /* as it was done by the who unlinked us */
627 break;
628 default: /* Other errors ... */
629 spin_lock_irqsave(&xfer->lock, flags);
630 wa = xfer->wa;
631 dev = &wa->usb_iface->dev;
632 rpipe = xfer->ep->hcpriv;
633 if (printk_ratelimit())
634 dev_err(dev, "xfer %p#%u: request error %d\n",
635 xfer, seg->index, urb->status);
636 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
637 EDC_ERROR_TIMEFRAME)){
638 dev_err(dev, "DTO: URB max acceptable errors "
639 "exceeded, resetting device\n");
640 wa_reset_all(wa);
641 }
642 usb_unlink_urb(seg->dto_urb);
643 seg->status = WA_SEG_ERROR;
644 seg->result = urb->status;
645 xfer->segs_done++;
646 __wa_xfer_abort(xfer);
647 rpipe_ready = rpipe_avail_inc(rpipe);
648 done = __wa_xfer_is_done(xfer);
649 spin_unlock_irqrestore(&xfer->lock, flags);
650 if (done)
651 wa_xfer_completion(xfer);
652 if (rpipe_ready)
653 wa_xfer_delayed_run(rpipe);
654 }
655 d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
656}
657
658/*
659 * Allocate the segs array and initialize each of them
660 *
661 * The segments are freed by wa_xfer_destroy() when the xfer use count
662 * drops to zero; however, because each segment is given the same life
663 * cycle as the USB URB it contains, it is actually freed by
664 * usb_put_urb() on the contained USB URB (twisted, eh?).
665 */
666static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
667{
668 int result, cnt;
669 size_t alloc_size = sizeof(*xfer->seg[0])
670 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
671 struct usb_device *usb_dev = xfer->wa->usb_dev;
672 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
673 struct wa_seg *seg;
674 size_t buf_itr, buf_size, buf_itr_size;
675
676 result = -ENOMEM;
677 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
678 if (xfer->seg == NULL)
679 goto error_segs_kzalloc;
680 buf_itr = 0;
681 buf_size = xfer->urb->transfer_buffer_length;
682 for (cnt = 0; cnt < xfer->segs; cnt++) {
683 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
684 if (seg == NULL)
685 goto error_seg_kzalloc;
686 wa_seg_init(seg);
687 seg->xfer = xfer;
688 seg->index = cnt;
689 usb_fill_bulk_urb(&seg->urb, usb_dev,
690 usb_sndbulkpipe(usb_dev,
691 dto_epd->bEndpointAddress),
692 &seg->xfer_hdr, xfer_hdr_size,
693 wa_seg_cb, seg);
694 buf_itr_size = buf_size > xfer->seg_size ?
695 xfer->seg_size : buf_size;
696 if (xfer->is_inbound == 0 && buf_size > 0) {
697 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
698 if (seg->dto_urb == NULL)
699 goto error_dto_alloc;
700 usb_fill_bulk_urb(
701 seg->dto_urb, usb_dev,
702 usb_sndbulkpipe(usb_dev,
703 dto_epd->bEndpointAddress),
704 NULL, 0, wa_seg_dto_cb, seg);
705 if (xfer->is_dma) {
706 seg->dto_urb->transfer_dma =
707 xfer->urb->transfer_dma + buf_itr;
708 seg->dto_urb->transfer_flags |=
709 URB_NO_TRANSFER_DMA_MAP;
710 } else
711 seg->dto_urb->transfer_buffer =
712 xfer->urb->transfer_buffer + buf_itr;
713 seg->dto_urb->transfer_buffer_length = buf_itr_size;
714 }
715 seg->status = WA_SEG_READY;
716 buf_itr += buf_itr_size;
717 buf_size -= buf_itr_size;
718 }
719 return 0;
720
721error_dto_alloc:
722 kfree(xfer->seg[cnt]);
723 cnt--;
724error_seg_kzalloc:
725 /* use the fact that cnt is left at were it failed */
726 for (; cnt > 0; cnt--) {
727 if (xfer->is_inbound == 0)
728 kfree(xfer->seg[cnt]->dto_urb);
729 kfree(xfer->seg[cnt]);
730 }
731error_segs_kzalloc:
732 return result;
733}
734
735/*
736 * Allocates all the stuff needed to submit a transfer
737 *
738 * Breaks the whole data buffer in a list of segments, each one has a
739 * structure allocated to it and linked in xfer->seg[index]
740 *
741 * FIXME: merge setup_segs() and the last part of this function, no
742 * need to do two for loops when we could run everything in a
743 * single one
744 */
745static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
746{
747 int result;
748 struct device *dev = &xfer->wa->usb_iface->dev;
749 enum wa_xfer_type xfer_type = 0; /* shut up GCC */
750 size_t xfer_hdr_size, cnt, transfer_size;
751 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
752
753 d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n",
754 xfer, xfer->ep->hcpriv, urb);
755
756 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
757 if (result < 0)
758 goto error_setup_sizes;
759 xfer_hdr_size = result;
760 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
761 if (result < 0) {
762 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
763 xfer, xfer->segs, result);
764 goto error_setup_segs;
765 }
766 /* Fill the first header */
767 xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
768 wa_xfer_id_init(xfer);
769 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
770
771 /* Fill remainig headers */
772 xfer_hdr = xfer_hdr0;
773 transfer_size = urb->transfer_buffer_length;
774 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
775 xfer->seg_size : transfer_size;
776 transfer_size -= xfer->seg_size;
777 for (cnt = 1; cnt < xfer->segs; cnt++) {
778 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
779 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
780 xfer_hdr->bTransferSegment = cnt;
781 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
782 cpu_to_le32(xfer->seg_size)
783 : cpu_to_le32(transfer_size);
784 xfer->seg[cnt]->status = WA_SEG_READY;
785 transfer_size -= xfer->seg_size;
786 }
787 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
788 result = 0;
789error_setup_segs:
790error_setup_sizes:
791 d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n",
792 xfer, xfer->ep->hcpriv, urb, result);
793 return result;
794}
795
796/*
797 *
798 *
799 * rpipe->seg_lock is held!
800 */
801static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
802 struct wa_seg *seg)
803{
804 int result;
805 result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
806 if (result < 0) {
807 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
808 xfer, seg->index, result);
809 goto error_seg_submit;
810 }
811 if (seg->dto_urb) {
812 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
813 if (result < 0) {
814 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
815 xfer, seg->index, result);
816 goto error_dto_submit;
817 }
818 }
819 seg->status = WA_SEG_SUBMITTED;
820 rpipe_avail_dec(rpipe);
821 return 0;
822
823error_dto_submit:
824 usb_unlink_urb(&seg->urb);
825error_seg_submit:
826 seg->status = WA_SEG_ERROR;
827 seg->result = result;
828 return result;
829}
830
831/*
832 * Execute more queued request segments until the maximum concurrent allowed
833 *
834 * The ugly unlock/lock sequence on the error path is needed as the
835 * xfer->lock normally nests the seg_lock and not viceversa.
836 *
837 */
838static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
839{
840 int result;
841 struct device *dev = &rpipe->wa->usb_iface->dev;
842 struct wa_seg *seg;
843 struct wa_xfer *xfer;
844 unsigned long flags;
845
846 d_fnstart(1, dev, "(rpipe #%d) %d segments available\n",
847 le16_to_cpu(rpipe->descr.wRPipeIndex),
848 atomic_read(&rpipe->segs_available));
849 spin_lock_irqsave(&rpipe->seg_lock, flags);
850 while (atomic_read(&rpipe->segs_available) > 0
851 && !list_empty(&rpipe->seg_list)) {
852 seg = list_entry(rpipe->seg_list.next, struct wa_seg,
853 list_node);
854 list_del(&seg->list_node);
855 xfer = seg->xfer;
856 result = __wa_seg_submit(rpipe, xfer, seg);
857 d_printf(1, dev, "xfer %p#%u submitted from delayed "
858 "[%d segments available] %d\n",
859 xfer, seg->index,
860 atomic_read(&rpipe->segs_available), result);
861 if (unlikely(result < 0)) {
862 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
863 spin_lock_irqsave(&xfer->lock, flags);
864 __wa_xfer_abort(xfer);
865 xfer->segs_done++;
866 spin_unlock_irqrestore(&xfer->lock, flags);
867 spin_lock_irqsave(&rpipe->seg_lock, flags);
868 }
869 }
870 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
871 d_fnend(1, dev, "(rpipe #%d) = void, %d segments available\n",
872 le16_to_cpu(rpipe->descr.wRPipeIndex),
873 atomic_read(&rpipe->segs_available));
874
875}
876
877/*
878 *
879 * xfer->lock is taken
880 *
881 * On failure submitting we just stop submitting and return error;
882 * wa_urb_enqueue_b() will execute the completion path
883 */
884static int __wa_xfer_submit(struct wa_xfer *xfer)
885{
886 int result;
887 struct wahc *wa = xfer->wa;
888 struct device *dev = &wa->usb_iface->dev;
889 unsigned cnt;
890 struct wa_seg *seg;
891 unsigned long flags;
892 struct wa_rpipe *rpipe = xfer->ep->hcpriv;
893 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
894 u8 available;
895 u8 empty;
896
897 d_fnstart(3, dev, "(xfer %p [rpipe %p])\n",
898 xfer, xfer->ep->hcpriv);
899
900 spin_lock_irqsave(&wa->xfer_list_lock, flags);
901 list_add_tail(&xfer->list_node, &wa->xfer_list);
902 spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
903
904 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
905 result = 0;
906 spin_lock_irqsave(&rpipe->seg_lock, flags);
907 for (cnt = 0; cnt < xfer->segs; cnt++) {
908 available = atomic_read(&rpipe->segs_available);
909 empty = list_empty(&rpipe->seg_list);
910 seg = xfer->seg[cnt];
911 d_printf(2, dev, "xfer %p#%u: available %u empty %u (%s)\n",
912 xfer, cnt, available, empty,
913 available == 0 || !empty ? "delayed" : "submitted");
914 if (available == 0 || !empty) {
915 d_printf(1, dev, "xfer %p#%u: delayed\n", xfer, cnt);
916 seg->status = WA_SEG_DELAYED;
917 list_add_tail(&seg->list_node, &rpipe->seg_list);
918 } else {
919 result = __wa_seg_submit(rpipe, xfer, seg);
920 if (result < 0)
921 goto error_seg_submit;
922 }
923 xfer->segs_submitted++;
924 }
925 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
926 d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer,
927 xfer->ep->hcpriv);
928 return result;
929
930error_seg_submit:
931 __wa_xfer_abort(xfer);
932 spin_unlock_irqrestore(&rpipe->seg_lock, flags);
933 d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer,
934 xfer->ep->hcpriv);
935 return result;
936}
937
938/*
939 * Second part of a URB/transfer enqueuement
940 *
941 * Assumes this comes from wa_urb_enqueue() [maybe through
942 * wa_urb_enqueue_run()]. At this point:
943 *
944 * xfer->wa filled and refcounted
945 * xfer->ep filled with rpipe refcounted if
946 * delayed == 0
947 * xfer->urb filled and refcounted (this is the case when called
948 * from wa_urb_enqueue() as we come from usb_submit_urb()
949 * and when called by wa_urb_enqueue_run(), as we took an
950 * extra ref dropped by _run() after we return).
951 * xfer->gfp filled
952 *
953 * If we fail at __wa_xfer_submit(), then we just check if we are done
954 * and if so, we run the completion procedure. However, if we are not
955 * yet done, we do nothing and wait for the completion handlers from
956 * the submitted URBs or from the xfer-result path to kick in. If xfer
957 * result never kicks in, the xfer will timeout from the USB code and
958 * dequeue() will be called.
959 */
960static void wa_urb_enqueue_b(struct wa_xfer *xfer)
961{
962 int result;
963 unsigned long flags;
964 struct urb *urb = xfer->urb;
965 struct wahc *wa = xfer->wa;
966 struct wusbhc *wusbhc = wa->wusb;
967 struct device *dev = &wa->usb_iface->dev;
968 struct wusb_dev *wusb_dev;
969 unsigned done;
970
971 d_fnstart(3, dev, "(wa %p urb %p)\n", wa, urb);
972 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
973 if (result < 0)
974 goto error_rpipe_get;
975 result = -ENODEV;
976 /* FIXME: segmentation broken -- kills DWA */
977 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
978 if (urb->dev == NULL)
979 goto error_dev_gone;
980 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
981 if (wusb_dev == NULL) {
982 mutex_unlock(&wusbhc->mutex);
983 goto error_dev_gone;
984 }
985 mutex_unlock(&wusbhc->mutex);
986
987 spin_lock_irqsave(&xfer->lock, flags);
988 xfer->wusb_dev = wusb_dev;
989 result = urb->status;
990 if (urb->status != -EINPROGRESS)
991 goto error_dequeued;
992
993 result = __wa_xfer_setup(xfer, urb);
994 if (result < 0)
995 goto error_xfer_setup;
996 result = __wa_xfer_submit(xfer);
997 if (result < 0)
998 goto error_xfer_submit;
999 spin_unlock_irqrestore(&xfer->lock, flags);
1000 d_fnend(3, dev, "(wa %p urb %p) = void\n", wa, urb);
1001 return;
1002
1003 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1004 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
1005 * upundo setup().
1006 */
1007error_xfer_setup:
1008error_dequeued:
1009 spin_unlock_irqrestore(&xfer->lock, flags);
1010 /* FIXME: segmentation broken, kills DWA */
1011 if (wusb_dev)
1012 wusb_dev_put(wusb_dev);
1013error_dev_gone:
1014 rpipe_put(xfer->ep->hcpriv);
1015error_rpipe_get:
1016 xfer->result = result;
1017 wa_xfer_giveback(xfer);
1018 d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result);
1019 return;
1020
1021error_xfer_submit:
1022 done = __wa_xfer_is_done(xfer);
1023 xfer->result = result;
1024 spin_unlock_irqrestore(&xfer->lock, flags);
1025 if (done)
1026 wa_xfer_completion(xfer);
1027 d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result);
1028 return;
1029}
1030
1031/*
1032 * Execute the delayed transfers in the Wire Adapter @wa
1033 *
1034 * We need to be careful here, as dequeue() could be called in the
1035 * middle. That's why we do the whole thing under the
1036 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
1037 * and then checks the list -- so as we would be acquiring in inverse
1038 * order, we just drop the lock once we have the xfer and reacquire it
1039 * later.
1040 */
1041void wa_urb_enqueue_run(struct work_struct *ws)
1042{
1043 struct wahc *wa = container_of(ws, struct wahc, xfer_work);
1044 struct device *dev = &wa->usb_iface->dev;
1045 struct wa_xfer *xfer, *next;
1046 struct urb *urb;
1047
1048 d_fnstart(3, dev, "(wa %p)\n", wa);
1049 spin_lock_irq(&wa->xfer_list_lock);
1050 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
1051 list_node) {
1052 list_del_init(&xfer->list_node);
1053 spin_unlock_irq(&wa->xfer_list_lock);
1054
1055 urb = xfer->urb;
1056 wa_urb_enqueue_b(xfer);
1057 usb_put_urb(urb); /* taken when queuing */
1058
1059 spin_lock_irq(&wa->xfer_list_lock);
1060 }
1061 spin_unlock_irq(&wa->xfer_list_lock);
1062 d_fnend(3, dev, "(wa %p) = void\n", wa);
1063}
1064EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1065
1066/*
1067 * Submit a transfer to the Wire Adapter in a delayed way
1068 *
1069 * The process of enqueuing involves possible sleeps() [see
1070 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1071 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1072 *
1073 * @urb: We own a reference to it done by the HCI Linux USB stack that
1074 * will be given up by calling usb_hcd_giveback_urb() or by
1075 * returning error from this function -> ergo we don't have to
1076 * refcount it.
1077 */
1078int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1079 struct urb *urb, gfp_t gfp)
1080{
1081 int result;
1082 struct device *dev = &wa->usb_iface->dev;
1083 struct wa_xfer *xfer;
1084 unsigned long my_flags;
1085 unsigned cant_sleep = irqs_disabled() | in_atomic();
1086
1087 d_fnstart(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x)\n",
1088 wa, ep, urb, urb->transfer_buffer_length, gfp);
1089
1090 if (urb->transfer_buffer == NULL
1091 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1092 && urb->transfer_buffer_length != 0) {
1093 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1094 dump_stack();
1095 }
1096
1097 result = -ENOMEM;
1098 xfer = kzalloc(sizeof(*xfer), gfp);
1099 if (xfer == NULL)
1100 goto error_kmalloc;
1101
1102 result = -ENOENT;
1103 if (urb->status != -EINPROGRESS) /* cancelled */
1104 goto error_dequeued; /* before starting? */
1105 wa_xfer_init(xfer);
1106 xfer->wa = wa_get(wa);
1107 xfer->urb = urb;
1108 xfer->gfp = gfp;
1109 xfer->ep = ep;
1110 urb->hcpriv = xfer;
1111 d_printf(2, dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1112 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1113 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1114 urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1115 cant_sleep ? "deferred" : "inline");
1116 if (cant_sleep) {
1117 usb_get_urb(urb);
1118 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1119 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1120 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1121 queue_work(wusbd, &wa->xfer_work);
1122 } else {
1123 wa_urb_enqueue_b(xfer);
1124 }
1125 d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = 0\n",
1126 wa, ep, urb, urb->transfer_buffer_length, gfp);
1127 return 0;
1128
1129error_dequeued:
1130 kfree(xfer);
1131error_kmalloc:
1132 d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = %d\n",
1133 wa, ep, urb, urb->transfer_buffer_length, gfp, result);
1134 return result;
1135}
1136EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1137
1138/*
1139 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1140 * handler] is called.
1141 *
1142 * Until a transfer goes successfully through wa_urb_enqueue() it
1143 * needs to be dequeued with completion calling; when stuck in delayed
1144 * or before wa_xfer_setup() is called, we need to do completion.
1145 *
1146 * not setup If there is no hcpriv yet, that means that that enqueue
1147 * still had no time to set the xfer up. Because
1148 * urb->status should be other than -EINPROGRESS,
1149 * enqueue() will catch that and bail out.
1150 *
1151 * If the transfer has gone through setup, we just need to clean it
1152 * up. If it has gone through submit(), we have to abort it [with an
1153 * asynch request] and then make sure we cancel each segment.
1154 *
1155 */
1156int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1157{
1158 struct device *dev = &wa->usb_iface->dev;
1159 unsigned long flags, flags2;
1160 struct wa_xfer *xfer;
1161 struct wa_seg *seg;
1162 struct wa_rpipe *rpipe;
1163 unsigned cnt;
1164 unsigned rpipe_ready = 0;
1165
1166 d_fnstart(3, dev, "(wa %p, urb %p)\n", wa, urb);
1167
1168 d_printf(1, dev, "xfer %p urb %p: aborting\n", urb->hcpriv, urb);
1169 xfer = urb->hcpriv;
1170 if (xfer == NULL) {
1171 /* NOthing setup yet enqueue will see urb->status !=
1172 * -EINPROGRESS (by hcd layer) and bail out with
1173 * error, no need to do completion
1174 */
1175 BUG_ON(urb->status == -EINPROGRESS);
1176 goto out;
1177 }
1178 spin_lock_irqsave(&xfer->lock, flags);
1179 rpipe = xfer->ep->hcpriv;
1180 /* Check the delayed list -> if there, release and complete */
1181 spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1182 if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1183 goto dequeue_delayed;
1184 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1185 if (xfer->seg == NULL) /* still hasn't reached */
1186 goto out_unlock; /* setup(), enqueue_b() completes */
1187 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1188 __wa_xfer_abort(xfer);
1189 for (cnt = 0; cnt < xfer->segs; cnt++) {
1190 seg = xfer->seg[cnt];
1191 switch (seg->status) {
1192 case WA_SEG_NOTREADY:
1193 case WA_SEG_READY:
1194 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1195 xfer, cnt, seg->status);
1196 WARN_ON(1);
1197 break;
1198 case WA_SEG_DELAYED:
1199 seg->status = WA_SEG_ABORTED;
1200 spin_lock_irqsave(&rpipe->seg_lock, flags2);
1201 list_del(&seg->list_node);
1202 xfer->segs_done++;
1203 rpipe_ready = rpipe_avail_inc(rpipe);
1204 spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1205 break;
1206 case WA_SEG_SUBMITTED:
1207 seg->status = WA_SEG_ABORTED;
1208 usb_unlink_urb(&seg->urb);
1209 if (xfer->is_inbound == 0)
1210 usb_unlink_urb(seg->dto_urb);
1211 xfer->segs_done++;
1212 rpipe_ready = rpipe_avail_inc(rpipe);
1213 break;
1214 case WA_SEG_PENDING:
1215 seg->status = WA_SEG_ABORTED;
1216 xfer->segs_done++;
1217 rpipe_ready = rpipe_avail_inc(rpipe);
1218 break;
1219 case WA_SEG_DTI_PENDING:
1220 usb_unlink_urb(wa->dti_urb);
1221 seg->status = WA_SEG_ABORTED;
1222 xfer->segs_done++;
1223 rpipe_ready = rpipe_avail_inc(rpipe);
1224 break;
1225 case WA_SEG_DONE:
1226 case WA_SEG_ERROR:
1227 case WA_SEG_ABORTED:
1228 break;
1229 }
1230 }
1231 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
1232 __wa_xfer_is_done(xfer);
1233 spin_unlock_irqrestore(&xfer->lock, flags);
1234 wa_xfer_completion(xfer);
1235 if (rpipe_ready)
1236 wa_xfer_delayed_run(rpipe);
1237 d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
1238 return 0;
1239
1240out_unlock:
1241 spin_unlock_irqrestore(&xfer->lock, flags);
1242out:
1243 d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
1244 return 0;
1245
1246dequeue_delayed:
1247 list_del_init(&xfer->list_node);
1248 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1249 xfer->result = urb->status;
1250 spin_unlock_irqrestore(&xfer->lock, flags);
1251 wa_xfer_giveback(xfer);
1252 usb_put_urb(urb); /* we got a ref in enqueue() */
1253 d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
1254 return 0;
1255}
1256EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1257
1258/*
1259 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1260 * codes
1261 *
1262 * Positive errno values are internal inconsistencies and should be
1263 * flagged louder. Negative are to be passed up to the user in the
1264 * normal way.
1265 *
1266 * @status: USB WA status code -- high two bits are stripped.
1267 */
1268static int wa_xfer_status_to_errno(u8 status)
1269{
1270 int errno;
1271 u8 real_status = status;
1272 static int xlat[] = {
1273 [WA_XFER_STATUS_SUCCESS] = 0,
1274 [WA_XFER_STATUS_HALTED] = -EPIPE,
1275 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
1276 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
1277 [WA_XFER_RESERVED] = EINVAL,
1278 [WA_XFER_STATUS_NOT_FOUND] = 0,
1279 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1280 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
1281 [WA_XFER_STATUS_ABORTED] = -EINTR,
1282 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
1283 [WA_XFER_INVALID_FORMAT] = EINVAL,
1284 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
1285 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
1286 };
1287 status &= 0x3f;
1288
1289 if (status == 0)
1290 return 0;
1291 if (status >= ARRAY_SIZE(xlat)) {
1292 if (printk_ratelimit())
1293 printk(KERN_ERR "%s(): BUG? "
1294 "Unknown WA transfer status 0x%02x\n",
1295 __func__, real_status);
1296 return -EINVAL;
1297 }
1298 errno = xlat[status];
1299 if (unlikely(errno > 0)) {
1300 if (printk_ratelimit())
1301 printk(KERN_ERR "%s(): BUG? "
1302 "Inconsistent WA status: 0x%02x\n",
1303 __func__, real_status);
1304 errno = -errno;
1305 }
1306 return errno;
1307}
1308
1309/*
1310 * Process a xfer result completion message
1311 *
1312 * inbound transfers: need to schedule a DTI read
1313 *
1314 * FIXME: this functio needs to be broken up in parts
1315 */
1316static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1317{
1318 int result;
1319 struct device *dev = &wa->usb_iface->dev;
1320 unsigned long flags;
1321 u8 seg_idx;
1322 struct wa_seg *seg;
1323 struct wa_rpipe *rpipe;
1324 struct wa_xfer_result *xfer_result = wa->xfer_result;
1325 u8 done = 0;
1326 u8 usb_status;
1327 unsigned rpipe_ready = 0;
1328
1329 d_fnstart(3, dev, "(wa %p xfer %p)\n", wa, xfer);
1330 spin_lock_irqsave(&xfer->lock, flags);
1331 seg_idx = xfer_result->bTransferSegment & 0x7f;
1332 if (unlikely(seg_idx >= xfer->segs))
1333 goto error_bad_seg;
1334 seg = xfer->seg[seg_idx];
1335 rpipe = xfer->ep->hcpriv;
1336 usb_status = xfer_result->bTransferStatus;
1337 d_printf(2, dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1338 xfer, seg_idx, usb_status, seg->status);
1339 if (seg->status == WA_SEG_ABORTED
1340 || seg->status == WA_SEG_ERROR) /* already handled */
1341 goto segment_aborted;
1342 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
1343 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
1344 if (seg->status != WA_SEG_PENDING) {
1345 if (printk_ratelimit())
1346 dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1347 xfer, seg_idx, seg->status);
1348 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
1349 }
1350 if (usb_status & 0x80) {
1351 seg->result = wa_xfer_status_to_errno(usb_status);
1352 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1353 xfer, seg->index, usb_status);
1354 goto error_complete;
1355 }
1356 /* FIXME: we ignore warnings, tally them for stats */
1357 if (usb_status & 0x40) /* Warning?... */
1358 usb_status = 0; /* ... pass */
1359 if (xfer->is_inbound) { /* IN data phase: read to buffer */
1360 seg->status = WA_SEG_DTI_PENDING;
1361 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1362 if (xfer->is_dma) {
1363 wa->buf_in_urb->transfer_dma =
1364 xfer->urb->transfer_dma
1365 + seg_idx * xfer->seg_size;
1366 wa->buf_in_urb->transfer_flags
1367 |= URB_NO_TRANSFER_DMA_MAP;
1368 } else {
1369 wa->buf_in_urb->transfer_buffer =
1370 xfer->urb->transfer_buffer
1371 + seg_idx * xfer->seg_size;
1372 wa->buf_in_urb->transfer_flags
1373 &= ~URB_NO_TRANSFER_DMA_MAP;
1374 }
1375 wa->buf_in_urb->transfer_buffer_length =
1376 le32_to_cpu(xfer_result->dwTransferLength);
1377 wa->buf_in_urb->context = seg;
1378 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1379 if (result < 0)
1380 goto error_submit_buf_in;
1381 } else {
1382 /* OUT data phase, complete it -- */
1383 seg->status = WA_SEG_DONE;
1384 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1385 xfer->segs_done++;
1386 rpipe_ready = rpipe_avail_inc(rpipe);
1387 done = __wa_xfer_is_done(xfer);
1388 }
1389 spin_unlock_irqrestore(&xfer->lock, flags);
1390 if (done)
1391 wa_xfer_completion(xfer);
1392 if (rpipe_ready)
1393 wa_xfer_delayed_run(rpipe);
1394 d_fnend(3, dev, "(wa %p xfer %p) = void\n", wa, xfer);
1395 return;
1396
1397
1398error_submit_buf_in:
1399 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1400 dev_err(dev, "DTI: URB max acceptable errors "
1401 "exceeded, resetting device\n");
1402 wa_reset_all(wa);
1403 }
1404 if (printk_ratelimit())
1405 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1406 xfer, seg_idx, result);
1407 seg->result = result;
1408error_complete:
1409 seg->status = WA_SEG_ERROR;
1410 xfer->segs_done++;
1411 rpipe_ready = rpipe_avail_inc(rpipe);
1412 __wa_xfer_abort(xfer);
1413 done = __wa_xfer_is_done(xfer);
1414 spin_unlock_irqrestore(&xfer->lock, flags);
1415 if (done)
1416 wa_xfer_completion(xfer);
1417 if (rpipe_ready)
1418 wa_xfer_delayed_run(rpipe);
1419 d_fnend(3, dev, "(wa %p xfer %p) = void [segment/DTI-submit error]\n",
1420 wa, xfer);
1421 return;
1422
1423
1424error_bad_seg:
1425 spin_unlock_irqrestore(&xfer->lock, flags);
1426 wa_urb_dequeue(wa, xfer->urb);
1427 if (printk_ratelimit())
1428 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1429 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1430 dev_err(dev, "DTI: URB max acceptable errors "
1431 "exceeded, resetting device\n");
1432 wa_reset_all(wa);
1433 }
1434 d_fnend(3, dev, "(wa %p xfer %p) = void [bad seg]\n", wa, xfer);
1435 return;
1436
1437
1438segment_aborted:
1439 /* nothing to do, as the aborter did the completion */
1440 spin_unlock_irqrestore(&xfer->lock, flags);
1441 d_fnend(3, dev, "(wa %p xfer %p) = void [segment aborted]\n",
1442 wa, xfer);
1443 return;
1444
1445}
1446
1447/*
1448 * Callback for the IN data phase
1449 *
1450 * If succesful transition state; otherwise, take a note of the
1451 * error, mark this segment done and try completion.
1452 *
1453 * Note we don't access until we are sure that the transfer hasn't
1454 * been cancelled (ECONNRESET, ENOENT), which could mean that
1455 * seg->xfer could be already gone.
1456 */
1457static void wa_buf_in_cb(struct urb *urb)
1458{
1459 struct wa_seg *seg = urb->context;
1460 struct wa_xfer *xfer = seg->xfer;
1461 struct wahc *wa;
1462 struct device *dev;
1463 struct wa_rpipe *rpipe;
1464 unsigned rpipe_ready;
1465 unsigned long flags;
1466 u8 done = 0;
1467
1468 d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
1469 switch (urb->status) {
1470 case 0:
1471 spin_lock_irqsave(&xfer->lock, flags);
1472 wa = xfer->wa;
1473 dev = &wa->usb_iface->dev;
1474 rpipe = xfer->ep->hcpriv;
1475 d_printf(2, dev, "xfer %p#%u: data in done (%zu bytes)\n",
1476 xfer, seg->index, (size_t)urb->actual_length);
1477 seg->status = WA_SEG_DONE;
1478 seg->result = urb->actual_length;
1479 xfer->segs_done++;
1480 rpipe_ready = rpipe_avail_inc(rpipe);
1481 done = __wa_xfer_is_done(xfer);
1482 spin_unlock_irqrestore(&xfer->lock, flags);
1483 if (done)
1484 wa_xfer_completion(xfer);
1485 if (rpipe_ready)
1486 wa_xfer_delayed_run(rpipe);
1487 break;
1488 case -ECONNRESET: /* URB unlinked; no need to do anything */
1489 case -ENOENT: /* as it was done by the who unlinked us */
1490 break;
1491 default: /* Other errors ... */
1492 spin_lock_irqsave(&xfer->lock, flags);
1493 wa = xfer->wa;
1494 dev = &wa->usb_iface->dev;
1495 rpipe = xfer->ep->hcpriv;
1496 if (printk_ratelimit())
1497 dev_err(dev, "xfer %p#%u: data in error %d\n",
1498 xfer, seg->index, urb->status);
1499 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1500 EDC_ERROR_TIMEFRAME)){
1501 dev_err(dev, "DTO: URB max acceptable errors "
1502 "exceeded, resetting device\n");
1503 wa_reset_all(wa);
1504 }
1505 seg->status = WA_SEG_ERROR;
1506 seg->result = urb->status;
1507 xfer->segs_done++;
1508 rpipe_ready = rpipe_avail_inc(rpipe);
1509 __wa_xfer_abort(xfer);
1510 done = __wa_xfer_is_done(xfer);
1511 spin_unlock_irqrestore(&xfer->lock, flags);
1512 if (done)
1513 wa_xfer_completion(xfer);
1514 if (rpipe_ready)
1515 wa_xfer_delayed_run(rpipe);
1516 }
1517 d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
1518}
1519
1520/*
1521 * Handle an incoming transfer result buffer
1522 *
1523 * Given a transfer result buffer, it completes the transfer (possibly
1524 * scheduling and buffer in read) and then resubmits the DTI URB for a
1525 * new transfer result read.
1526 *
1527 *
1528 * The xfer_result DTI URB state machine
1529 *
1530 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1531 *
1532 * We start in OFF mode, the first xfer_result notification [through
1533 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1534 * read.
1535 *
1536 * We receive a buffer -- if it is not a xfer_result, we complain and
1537 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1538 * request accounting. If it is an IN segment, we move to RBI and post
1539 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1540 * repost the DTI-URB and move to RXR state. if there was no IN
1541 * segment, it will repost the DTI-URB.
1542 *
1543 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1544 * errors) in the URBs.
1545 */
1546static void wa_xfer_result_cb(struct urb *urb)
1547{
1548 int result;
1549 struct wahc *wa = urb->context;
1550 struct device *dev = &wa->usb_iface->dev;
1551 struct wa_xfer_result *xfer_result;
1552 u32 xfer_id;
1553 struct wa_xfer *xfer;
1554 u8 usb_status;
1555
1556 d_fnstart(3, dev, "(%p)\n", wa);
1557 BUG_ON(wa->dti_urb != urb);
1558 switch (wa->dti_urb->status) {
1559 case 0:
1560 /* We have a xfer result buffer; check it */
1561 d_printf(2, dev, "DTI: xfer result %d bytes at %p\n",
1562 urb->actual_length, urb->transfer_buffer);
1563 d_dump(3, dev, urb->transfer_buffer, urb->actual_length);
1564 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1565 dev_err(dev, "DTI Error: xfer result--bad size "
1566 "xfer result (%d bytes vs %zu needed)\n",
1567 urb->actual_length, sizeof(*xfer_result));
1568 break;
1569 }
1570 xfer_result = wa->xfer_result;
1571 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1572 dev_err(dev, "DTI Error: xfer result--"
1573 "bad header length %u\n",
1574 xfer_result->hdr.bLength);
1575 break;
1576 }
1577 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1578 dev_err(dev, "DTI Error: xfer result--"
1579 "bad header type 0x%02x\n",
1580 xfer_result->hdr.bNotifyType);
1581 break;
1582 }
1583 usb_status = xfer_result->bTransferStatus & 0x3f;
1584 if (usb_status == WA_XFER_STATUS_ABORTED
1585 || usb_status == WA_XFER_STATUS_NOT_FOUND)
1586 /* taken care of already */
1587 break;
1588 xfer_id = xfer_result->dwTransferID;
1589 xfer = wa_xfer_get_by_id(wa, xfer_id);
1590 if (xfer == NULL) {
1591 /* FIXME: transaction might have been cancelled */
1592 dev_err(dev, "DTI Error: xfer result--"
1593 "unknown xfer 0x%08x (status 0x%02x)\n",
1594 xfer_id, usb_status);
1595 break;
1596 }
1597 wa_xfer_result_chew(wa, xfer);
1598 wa_xfer_put(xfer);
1599 break;
1600 case -ENOENT: /* (we killed the URB)...so, no broadcast */
1601 case -ESHUTDOWN: /* going away! */
1602 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1603 goto out;
1604 default:
1605 /* Unknown error */
1606 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1607 EDC_ERROR_TIMEFRAME)) {
1608 dev_err(dev, "DTI: URB max acceptable errors "
1609 "exceeded, resetting device\n");
1610 wa_reset_all(wa);
1611 goto out;
1612 }
1613 if (printk_ratelimit())
1614 dev_err(dev, "DTI: URB error %d\n", urb->status);
1615 break;
1616 }
1617 /* Resubmit the DTI URB */
1618 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1619 if (result < 0) {
1620 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1621 "resetting\n", result);
1622 wa_reset_all(wa);
1623 }
1624out:
1625 d_fnend(3, dev, "(%p) = void\n", wa);
1626 return;
1627}
1628
1629/*
1630 * Transfer complete notification
1631 *
1632 * Called from the notif.c code. We get a notification on EP2 saying
1633 * that some endpoint has some transfer result data available. We are
1634 * about to read it.
1635 *
1636 * To speed up things, we always have a URB reading the DTI URB; we
1637 * don't really set it up and start it until the first xfer complete
1638 * notification arrives, which is what we do here.
1639 *
1640 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1641 * machine starts.
1642 *
1643 * So here we just initialize the DTI URB for reading transfer result
1644 * notifications and also the buffer-in URB, for reading buffers. Then
1645 * we just submit the DTI URB.
1646 *
1647 * @wa shall be referenced
1648 */
1649void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1650{
1651 int result;
1652 struct device *dev = &wa->usb_iface->dev;
1653 struct wa_notif_xfer *notif_xfer;
1654 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1655
1656 d_fnstart(4, dev, "(%p, %p)\n", wa, notif_hdr);
1657 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1658 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1659
1660 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1661 /* FIXME: hardcoded limitation, adapt */
1662 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1663 notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1664 goto error;
1665 }
1666 if (wa->dti_urb != NULL) /* DTI URB already started */
1667 goto out;
1668
1669 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1670 if (wa->dti_urb == NULL) {
1671 dev_err(dev, "Can't allocate DTI URB\n");
1672 goto error_dti_urb_alloc;
1673 }
1674 usb_fill_bulk_urb(
1675 wa->dti_urb, wa->usb_dev,
1676 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1677 wa->xfer_result, wa->xfer_result_size,
1678 wa_xfer_result_cb, wa);
1679
1680 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1681 if (wa->buf_in_urb == NULL) {
1682 dev_err(dev, "Can't allocate BUF-IN URB\n");
1683 goto error_buf_in_urb_alloc;
1684 }
1685 usb_fill_bulk_urb(
1686 wa->buf_in_urb, wa->usb_dev,
1687 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1688 NULL, 0, wa_buf_in_cb, wa);
1689 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1690 if (result < 0) {
1691 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1692 "resetting\n", result);
1693 goto error_dti_urb_submit;
1694 }
1695out:
1696 d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr);
1697 return;
1698
1699error_dti_urb_submit:
1700 usb_put_urb(wa->buf_in_urb);
1701error_buf_in_urb_alloc:
1702 usb_put_urb(wa->dti_urb);
1703 wa->dti_urb = NULL;
1704error_dti_urb_alloc:
1705error:
1706 wa_reset_all(wa);
1707 d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr);
1708 return;
1709}
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
new file mode 100644
index 000000000000..07c63a31c799
--- /dev/null
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -0,0 +1,418 @@
1/*
2 * Wireless USB Host Controller
3 * sysfs glue, wusbcore module support and life cycle management
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * Creation/destruction of wusbhc is split in two parts; that that
25 * doesn't require the HCD to be added (wusbhc_{create,destroy}) and
26 * the one that requires (phase B, wusbhc_b_{create,destroy}).
27 *
28 * This is so because usb_add_hcd() will start the HC, and thus, all
29 * the HC specific stuff has to be already initialiazed (like sysfs
30 * thingies).
31 */
32#include <linux/device.h>
33#include <linux/module.h>
34#include "wusbhc.h"
35
36/**
37 * Extract the wusbhc that corresponds to a USB Host Controller class device
38 *
39 * WARNING! Apply only if @dev is that of a
40 * wusbhc.usb_hcd.self->class_dev; otherwise, you loose.
41 */
42static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev)
43{
44 struct usb_bus *usb_bus = dev_get_drvdata(dev);
45 struct usb_hcd *usb_hcd = bus_to_hcd(usb_bus);
46 return usb_hcd_to_wusbhc(usb_hcd);
47}
48
49/*
50 * Show & store the current WUSB trust timeout
51 *
52 * We don't do locking--it is an 'atomic' value.
53 *
54 * The units that we store/show are always MILLISECONDS. However, the
55 * value of trust_timeout is jiffies.
56 */
57static ssize_t wusb_trust_timeout_show(struct device *dev,
58 struct device_attribute *attr, char *buf)
59{
60 struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
61
62 return scnprintf(buf, PAGE_SIZE, "%u\n", wusbhc->trust_timeout);
63}
64
65static ssize_t wusb_trust_timeout_store(struct device *dev,
66 struct device_attribute *attr,
67 const char *buf, size_t size)
68{
69 struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
70 ssize_t result = -ENOSYS;
71 unsigned trust_timeout;
72
73 result = sscanf(buf, "%u", &trust_timeout);
74 if (result != 1) {
75 result = -EINVAL;
76 goto out;
77 }
78 /* FIXME: maybe we should check for range validity? */
79 wusbhc->trust_timeout = trust_timeout;
80 cancel_delayed_work(&wusbhc->keep_alive_timer);
81 flush_workqueue(wusbd);
82 queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
83 (trust_timeout * CONFIG_HZ)/1000/2);
84out:
85 return result < 0 ? result : size;
86}
87static DEVICE_ATTR(wusb_trust_timeout, 0644, wusb_trust_timeout_show,
88 wusb_trust_timeout_store);
89
90/*
91 * Show & store the current WUSB CHID
92 */
93static ssize_t wusb_chid_show(struct device *dev,
94 struct device_attribute *attr, char *buf)
95{
96 struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
97 ssize_t result = 0;
98
99 if (wusbhc->wuie_host_info != NULL)
100 result += ckhdid_printf(buf, PAGE_SIZE,
101 &wusbhc->wuie_host_info->CHID);
102 return result;
103}
104
105/*
106 * Store a new CHID
107 *
108 * This will (FIXME) trigger many changes.
109 *
110 * - Send an all zeros CHID and it will stop the controller
111 * - Send a non-zero CHID and it will start it
112 * (unless it was started, it will just change the CHID,
113 * diconnecting all devices first).
114 *
115 * So first we scan the MMC we are sent and then we act on it. We
116 * read it in the same format as we print it, an ASCII string of 16
117 * hex bytes.
118 *
119 * See wusbhc_chid_set() for more info.
120 */
121static ssize_t wusb_chid_store(struct device *dev,
122 struct device_attribute *attr,
123 const char *buf, size_t size)
124{
125 struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
126 struct wusb_ckhdid chid;
127 ssize_t result;
128
129 result = sscanf(buf,
130 "%02hhx %02hhx %02hhx %02hhx "
131 "%02hhx %02hhx %02hhx %02hhx "
132 "%02hhx %02hhx %02hhx %02hhx "
133 "%02hhx %02hhx %02hhx %02hhx\n",
134 &chid.data[0] , &chid.data[1] ,
135 &chid.data[2] , &chid.data[3] ,
136 &chid.data[4] , &chid.data[5] ,
137 &chid.data[6] , &chid.data[7] ,
138 &chid.data[8] , &chid.data[9] ,
139 &chid.data[10], &chid.data[11],
140 &chid.data[12], &chid.data[13],
141 &chid.data[14], &chid.data[15]);
142 if (result != 16) {
143 dev_err(dev, "Unrecognized CHID (need 16 8-bit hex digits): "
144 "%d\n", (int)result);
145 return -EINVAL;
146 }
147 result = wusbhc_chid_set(wusbhc, &chid);
148 return result < 0 ? result : size;
149}
150static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store);
151
152/* Group all the WUSBHC attributes */
153static struct attribute *wusbhc_attrs[] = {
154 &dev_attr_wusb_trust_timeout.attr,
155 &dev_attr_wusb_chid.attr,
156 NULL,
157};
158
159static struct attribute_group wusbhc_attr_group = {
160 .name = NULL, /* we want them in the same directory */
161 .attrs = wusbhc_attrs,
162};
163
164/*
165 * Create a wusbhc instance
166 *
167 * NOTEs:
168 *
169 * - assumes *wusbhc has been zeroed and wusbhc->usb_hcd has been
170 * initialized but not added.
171 *
172 * - fill out ports_max, mmcies_max and mmcie_{add,rm} before calling.
173 *
174 * - fill out wusbhc->uwb_rc and refcount it before calling
175 * - fill out the wusbhc->sec_modes array
176 */
177int wusbhc_create(struct wusbhc *wusbhc)
178{
179 int result = 0;
180
181 wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS;
182 mutex_init(&wusbhc->mutex);
183 result = wusbhc_mmcie_create(wusbhc);
184 if (result < 0)
185 goto error_mmcie_create;
186 result = wusbhc_devconnect_create(wusbhc);
187 if (result < 0)
188 goto error_devconnect_create;
189 result = wusbhc_rh_create(wusbhc);
190 if (result < 0)
191 goto error_rh_create;
192 result = wusbhc_sec_create(wusbhc);
193 if (result < 0)
194 goto error_sec_create;
195 return 0;
196
197error_sec_create:
198 wusbhc_rh_destroy(wusbhc);
199error_rh_create:
200 wusbhc_devconnect_destroy(wusbhc);
201error_devconnect_create:
202 wusbhc_mmcie_destroy(wusbhc);
203error_mmcie_create:
204 return result;
205}
206EXPORT_SYMBOL_GPL(wusbhc_create);
207
208static inline struct kobject *wusbhc_kobj(struct wusbhc *wusbhc)
209{
210 return &wusbhc->usb_hcd.self.controller->kobj;
211}
212
213/*
214 * Phase B of a wusbhc instance creation
215 *
216 * Creates fields that depend on wusbhc->usb_hcd having been
217 * added. This is where we create the sysfs files in
218 * /sys/class/usb_host/usb_hostX/.
219 *
220 * NOTE: Assumes wusbhc->usb_hcd has been already added by the upper
221 * layer (hwahc or whci)
222 */
223int wusbhc_b_create(struct wusbhc *wusbhc)
224{
225 int result = 0;
226 struct device *dev = wusbhc->usb_hcd.self.controller;
227
228 result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
229 if (result < 0) {
230 dev_err(dev, "Cannot register WUSBHC attributes: %d\n", result);
231 goto error_create_attr_group;
232 }
233
234 result = wusbhc_pal_register(wusbhc);
235 if (result < 0)
236 goto error_pal_register;
237 return 0;
238
239error_pal_register:
240 sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
241error_create_attr_group:
242 return result;
243}
244EXPORT_SYMBOL_GPL(wusbhc_b_create);
245
246void wusbhc_b_destroy(struct wusbhc *wusbhc)
247{
248 wusbhc_pal_unregister(wusbhc);
249 sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
250}
251EXPORT_SYMBOL_GPL(wusbhc_b_destroy);
252
253void wusbhc_destroy(struct wusbhc *wusbhc)
254{
255 wusbhc_sec_destroy(wusbhc);
256 wusbhc_rh_destroy(wusbhc);
257 wusbhc_devconnect_destroy(wusbhc);
258 wusbhc_mmcie_destroy(wusbhc);
259}
260EXPORT_SYMBOL_GPL(wusbhc_destroy);
261
262struct workqueue_struct *wusbd;
263EXPORT_SYMBOL_GPL(wusbd);
264
265/*
266 * WUSB Cluster ID allocation map
267 *
268 * Each WUSB bus in a channel is identified with a Cluster Id in the
269 * unauth address pace (WUSB1.0[4.3]). We take the range 0xe0 to 0xff
270 * (that's space for 31 WUSB controllers, as 0xff can't be taken). We
271 * start taking from 0xff, 0xfe, 0xfd... (hence the += or -= 0xff).
272 *
273 * For each one we taken, we pin it in the bitap
274 */
275#define CLUSTER_IDS 32
276static DECLARE_BITMAP(wusb_cluster_id_table, CLUSTER_IDS);
277static DEFINE_SPINLOCK(wusb_cluster_ids_lock);
278
279/*
280 * Get a WUSB Cluster ID
281 *
282 * Need to release with wusb_cluster_id_put() when done w/ it.
283 */
284/* FIXME: coordinate with the choose_addres() from the USB stack */
285/* we want to leave the top of the 128 range for cluster addresses and
286 * the bottom for device addresses (as we map them one on one with
287 * ports). */
288u8 wusb_cluster_id_get(void)
289{
290 u8 id;
291 spin_lock(&wusb_cluster_ids_lock);
292 id = find_first_zero_bit(wusb_cluster_id_table, CLUSTER_IDS);
293 if (id > CLUSTER_IDS) {
294 id = 0;
295 goto out;
296 }
297 set_bit(id, wusb_cluster_id_table);
298 id = (u8) 0xff - id;
299out:
300 spin_unlock(&wusb_cluster_ids_lock);
301 return id;
302
303}
304EXPORT_SYMBOL_GPL(wusb_cluster_id_get);
305
306/*
307 * Release a WUSB Cluster ID
308 *
309 * Obtained it with wusb_cluster_id_get()
310 */
311void wusb_cluster_id_put(u8 id)
312{
313 id = 0xff - id;
314 BUG_ON(id >= CLUSTER_IDS);
315 spin_lock(&wusb_cluster_ids_lock);
316 WARN_ON(!test_bit(id, wusb_cluster_id_table));
317 clear_bit(id, wusb_cluster_id_table);
318 spin_unlock(&wusb_cluster_ids_lock);
319}
320EXPORT_SYMBOL_GPL(wusb_cluster_id_put);
321
322/**
323 * wusbhc_giveback_urb - return an URB to the USB core
324 * @wusbhc: the host controller the URB is from.
325 * @urb: the URB.
326 * @status: the URB's status.
327 *
328 * Return an URB to the USB core doing some additional WUSB specific
329 * processing.
330 *
331 * - After a successful transfer, update the trust timeout timestamp
332 * for the WUSB device.
333 *
334 * - [WUSB] sections 4.13 and 7.5.1 specifies the stop retrasmittion
335 * condition for the WCONNECTACK_IE is that the host has observed
336 * the associated device responding to a control transfer.
337 */
338void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status)
339{
340 struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
341
342 if (status == 0) {
343 wusb_dev->entry_ts = jiffies;
344
345 /* wusbhc_devconnect_acked() can't be called from from
346 atomic context so defer it to a work queue. */
347 if (!list_empty(&wusb_dev->cack_node))
348 queue_work(wusbd, &wusb_dev->devconnect_acked_work);
349 }
350
351 usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status);
352}
353EXPORT_SYMBOL_GPL(wusbhc_giveback_urb);
354
355/**
356 * wusbhc_reset_all - reset the HC hardware
357 * @wusbhc: the host controller to reset.
358 *
359 * Request a full hardware reset of the chip. This will also reset
360 * the radio controller and any other PALs.
361 */
362void wusbhc_reset_all(struct wusbhc *wusbhc)
363{
364 uwb_rc_reset_all(wusbhc->uwb_rc);
365}
366EXPORT_SYMBOL_GPL(wusbhc_reset_all);
367
368static struct notifier_block wusb_usb_notifier = {
369 .notifier_call = wusb_usb_ncb,
370 .priority = INT_MAX /* Need to be called first of all */
371};
372
373static int __init wusbcore_init(void)
374{
375 int result;
376 result = wusb_crypto_init();
377 if (result < 0)
378 goto error_crypto_init;
379 /* WQ is singlethread because we need to serialize notifications */
380 wusbd = create_singlethread_workqueue("wusbd");
381 if (wusbd == NULL) {
382 result = -ENOMEM;
383 printk(KERN_ERR "WUSB-core: Cannot create wusbd workqueue\n");
384 goto error_wusbd_create;
385 }
386 usb_register_notify(&wusb_usb_notifier);
387 bitmap_zero(wusb_cluster_id_table, CLUSTER_IDS);
388 set_bit(0, wusb_cluster_id_table); /* reserve Cluster ID 0xff */
389 return 0;
390
391error_wusbd_create:
392 wusb_crypto_exit();
393error_crypto_init:
394 return result;
395
396}
397module_init(wusbcore_init);
398
399static void __exit wusbcore_exit(void)
400{
401 clear_bit(0, wusb_cluster_id_table);
402 if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) {
403 char buf[256];
404 bitmap_scnprintf(buf, sizeof(buf), wusb_cluster_id_table,
405 CLUSTER_IDS);
406 printk(KERN_ERR "BUG: WUSB Cluster IDs not released "
407 "on exit: %s\n", buf);
408 WARN_ON(1);
409 }
410 usb_unregister_notify(&wusb_usb_notifier);
411 destroy_workqueue(wusbd);
412 wusb_crypto_exit();
413}
414module_exit(wusbcore_exit);
415
416MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
417MODULE_DESCRIPTION("Wireless USB core");
418MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
new file mode 100644
index 000000000000..d0c132434f1b
--- /dev/null
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -0,0 +1,495 @@
1/*
2 * Wireless USB Host Controller
3 * Common infrastructure for WHCI and HWA WUSB-HC drivers
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This driver implements parts common to all Wireless USB Host
25 * Controllers (struct wusbhc, embedding a struct usb_hcd) and is used
26 * by:
27 *
28 * - hwahc: HWA, USB-dongle that implements a Wireless USB host
29 * controller, (Wireless USB 1.0 Host-Wire-Adapter specification).
30 *
31 * - whci: WHCI, a PCI card with a wireless host controller
32 * (Wireless Host Controller Interface 1.0 specification).
33 *
34 * Check out the Design-overview.txt file in the source documentation
35 * for other details on the implementation.
36 *
37 * Main blocks:
38 *
39 * rh Root Hub emulation (part of the HCD glue)
40 *
41 * devconnect Handle all the issues related to device connection,
42 * authentication, disconnection, timeout, reseting,
43 * keepalives, etc.
44 *
45 * mmc MMC IE broadcasting handling
46 *
47 * A host controller driver just initializes its stuff and as part of
48 * that, creates a 'struct wusbhc' instance that handles all the
49 * common WUSB mechanisms. Links in the function ops that are specific
50 * to it and then registers the host controller. Ready to run.
51 */
52
53#ifndef __WUSBHC_H__
54#define __WUSBHC_H__
55
56#include <linux/usb.h>
57#include <linux/list.h>
58#include <linux/mutex.h>
59#include <linux/kref.h>
60#include <linux/workqueue.h>
61/* FIXME: Yes, I know: BAD--it's not my fault the USB HC iface is not
62 * public */
63#include <linux/../../drivers/usb/core/hcd.h>
64#include <linux/uwb.h>
65#include <linux/usb/wusb.h>
66
67
68/**
69 * Wireless USB device
70 *
71 * Describe a WUSB device connected to the cluster. This struct
72 * belongs to the 'struct wusb_port' it is attached to and it is
73 * responsible for putting and clearing the pointer to it.
74 *
75 * Note this "complements" the 'struct usb_device' that the usb_hcd
76 * keeps for each connected USB device. However, it extends some
77 * information that is not available (there is no hcpriv ptr in it!)
78 * *and* most importantly, it's life cycle is different. It is created
79 * as soon as we get a DN_Connect (connect request notification) from
80 * the device through the WUSB host controller; the USB stack doesn't
81 * create the device until we authenticate it. FIXME: this will
82 * change.
83 *
84 * @bos: This is allocated when the BOS descriptors are read from
85 * the device and freed upon the wusb_dev struct dying.
86 * @wusb_cap_descr: points into @bos, and has been verified to be size
87 * safe.
88 */
89struct wusb_dev {
90 struct kref refcnt;
91 struct wusbhc *wusbhc;
92 struct list_head cack_node; /* Connect-Ack list */
93 u8 port_idx;
94 u8 addr;
95 u8 beacon_type:4;
96 struct usb_encryption_descriptor ccm1_etd;
97 struct wusb_ckhdid cdid;
98 unsigned long entry_ts;
99 struct usb_bos_descriptor *bos;
100 struct usb_wireless_cap_descriptor *wusb_cap_descr;
101 struct uwb_mas_bm availability;
102 struct work_struct devconnect_acked_work;
103 struct urb *set_gtk_urb;
104 struct usb_ctrlrequest *set_gtk_req;
105 struct usb_device *usb_dev;
106};
107
108#define WUSB_DEV_ADDR_UNAUTH 0x80
109
110static inline void wusb_dev_init(struct wusb_dev *wusb_dev)
111{
112 kref_init(&wusb_dev->refcnt);
113 /* no need to init the cack_node */
114}
115
116extern void wusb_dev_destroy(struct kref *_wusb_dev);
117
118static inline struct wusb_dev *wusb_dev_get(struct wusb_dev *wusb_dev)
119{
120 kref_get(&wusb_dev->refcnt);
121 return wusb_dev;
122}
123
124static inline void wusb_dev_put(struct wusb_dev *wusb_dev)
125{
126 kref_put(&wusb_dev->refcnt, wusb_dev_destroy);
127}
128
129/**
130 * Wireless USB Host Controlller root hub "fake" ports
131 * (state and device information)
132 *
133 * Wireless USB is wireless, so there are no ports; but we
134 * fake'em. Each RC can connect a max of devices at the same time
135 * (given in the Wireless Adapter descriptor, bNumPorts or WHCI's
136 * caps), referred to in wusbhc->ports_max.
137 *
138 * See rh.c for more information.
139 *
140 * The @status and @change use the same bits as in USB2.0[11.24.2.7],
141 * so we don't have to do much when getting the port's status.
142 *
143 * WUSB1.0[7.1], USB2.0[11.24.2.7.1,fig 11-10],
144 * include/linux/usb_ch9.h (#define USB_PORT_STAT_*)
145 */
146struct wusb_port {
147 u16 status;
148 u16 change;
149 struct wusb_dev *wusb_dev; /* connected device's info */
150 unsigned reset_count;
151 u32 ptk_tkid;
152};
153
154/**
155 * WUSB Host Controller specifics
156 *
157 * All fields that are common to all Wireless USB controller types
158 * (HWA and WHCI) are grouped here. Host Controller
159 * functions/operations that only deal with general Wireless USB HC
160 * issues use this data type to refer to the host.
161 *
162 * @usb_hcd Instantiation of a USB host controller
163 * (initialized by upper layer [HWA=HC or WHCI].
164 *
165 * @dev Device that implements this; initialized by the
166 * upper layer (HWA-HC, WHCI...); this device should
167 * have a refcount.
168 *
169 * @trust_timeout After this time without hearing for device
170 * activity, we consider the device gone and we have to
171 * re-authenticate.
172 *
173 * Can be accessed w/o locking--however, read to a
174 * local variable then use.
175 *
176 * @chid WUSB Cluster Host ID: this is supposed to be a
177 * unique value that doesn't change across reboots (so
178 * that your devices do not require re-association).
179 *
180 * Read/Write protected by @mutex
181 *
182 * @dev_info This array has ports_max elements. It is used to
183 * give the HC information about the WUSB devices (see
184 * 'struct wusb_dev_info').
185 *
186 * For HWA we need to allocate it in heap; for WHCI it
187 * needs to be permanently mapped, so we keep it for
188 * both and make it easy. Call wusbhc->dev_info_set()
189 * to update an entry.
190 *
191 * @ports_max Number of simultaneous device connections (fake
192 * ports) this HC will take. Read-only.
193 *
194 * @port Array of port status for each fake root port. Guaranteed to
195 * always be the same lenght during device existence
196 * [this allows for some unlocked but referenced reading].
197 *
198 * @mmcies_max Max number of Information Elements this HC can send
199 * in its MMC. Read-only.
200 *
201 * @mmcie_add HC specific operation (WHCI or HWA) for adding an
202 * MMCIE.
203 *
204 * @mmcie_rm HC specific operation (WHCI or HWA) for removing an
205 * MMCIE.
206 *
207 * @enc_types Array which describes the encryptions methods
208 * supported by the host as described in WUSB1.0 --
209 * one entry per supported method. As of WUSB1.0 there
210 * is only four methods, we make space for eight just in
211 * case they decide to add some more (and pray they do
212 * it in sequential order). if 'enc_types[enc_method]
213 * != 0', then it is supported by the host. enc_method
214 * is USB_ENC_TYPE*.
215 *
216 * @set_ptk: Set the PTK and enable encryption for a device. Or, if
217 * the supplied key is NULL, disable encryption for that
218 * device.
219 *
220 * @set_gtk: Set the GTK to be used for all future broadcast packets
221 * (i.e., MMCs). With some hardware, setting the GTK may start
222 * MMC transmission.
223 *
224 * NOTE:
225 *
226 * - If wusb_dev->usb_dev is not NULL, then usb_dev is valid
227 * (wusb_dev has a refcount on it). Likewise, if usb_dev->wusb_dev
228 * is not NULL, usb_dev->wusb_dev is valid (usb_dev keeps a
229 * refcount on it).
230 *
231 * Most of the times when you need to use it, it will be non-NULL,
232 * so there is no real need to check for it (wusb_dev will
233 * dissapear before usb_dev).
234 *
235 * - The following fields need to be filled out before calling
236 * wusbhc_create(): ports_max, mmcies_max, mmcie_{add,rm}.
237 *
238 * - there is no wusbhc_init() method, we do everything in
239 * wusbhc_create().
240 *
241 * - Creation is done in two phases, wusbhc_create() and
242 * wusbhc_create_b(); b are the parts that need to be called after
243 * calling usb_hcd_add(&wusbhc->usb_hcd).
244 */
245struct wusbhc {
246 struct usb_hcd usb_hcd; /* HAS TO BE 1st */
247 struct device *dev;
248 struct uwb_rc *uwb_rc;
249 struct uwb_pal pal;
250
251 unsigned trust_timeout; /* in jiffies */
252 struct wuie_host_info *wuie_host_info; /* Includes CHID */
253
254 struct mutex mutex; /* locks everything else */
255 u16 cluster_id; /* Wireless USB Cluster ID */
256 struct wusb_port *port; /* Fake port status handling */
257 struct wusb_dev_info *dev_info; /* for Set Device Info mgmt */
258 u8 ports_max;
259 unsigned active:1; /* currently xmit'ing MMCs */
260 struct wuie_keep_alive keep_alive_ie; /* protected by mutex */
261 struct delayed_work keep_alive_timer;
262 struct list_head cack_list; /* Connect acknowledging */
263 size_t cack_count; /* protected by 'mutex' */
264 struct wuie_connect_ack cack_ie;
265 struct uwb_rsv *rsv; /* cluster bandwidth reservation */
266
267 struct mutex mmcie_mutex; /* MMC WUIE handling */
268 struct wuie_hdr **mmcie; /* WUIE array */
269 u8 mmcies_max;
270 /* FIXME: make wusbhc_ops? */
271 int (*start)(struct wusbhc *wusbhc);
272 void (*stop)(struct wusbhc *wusbhc);
273 int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
274 u8 handle, struct wuie_hdr *wuie);
275 int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle);
276 int (*dev_info_set)(struct wusbhc *, struct wusb_dev *wusb_dev);
277 int (*bwa_set)(struct wusbhc *wusbhc, s8 stream_index,
278 const struct uwb_mas_bm *);
279 int (*set_ptk)(struct wusbhc *wusbhc, u8 port_idx,
280 u32 tkid, const void *key, size_t key_size);
281 int (*set_gtk)(struct wusbhc *wusbhc,
282 u32 tkid, const void *key, size_t key_size);
283 int (*set_num_dnts)(struct wusbhc *wusbhc, u8 interval, u8 slots);
284
285 struct {
286 struct usb_key_descriptor descr;
287 u8 data[16]; /* GTK key data */
288 } __attribute__((packed)) gtk;
289 u8 gtk_index;
290 u32 gtk_tkid;
291 struct work_struct gtk_rekey_done_work;
292 int pending_set_gtks;
293
294 struct usb_encryption_descriptor *ccm1_etd;
295};
296
297#define usb_hcd_to_wusbhc(u) container_of((u), struct wusbhc, usb_hcd)
298
299
300extern int wusbhc_create(struct wusbhc *);
301extern int wusbhc_b_create(struct wusbhc *);
302extern void wusbhc_b_destroy(struct wusbhc *);
303extern void wusbhc_destroy(struct wusbhc *);
304extern int wusb_dev_sysfs_add(struct wusbhc *, struct usb_device *,
305 struct wusb_dev *);
306extern void wusb_dev_sysfs_rm(struct wusb_dev *);
307extern int wusbhc_sec_create(struct wusbhc *);
308extern int wusbhc_sec_start(struct wusbhc *);
309extern void wusbhc_sec_stop(struct wusbhc *);
310extern void wusbhc_sec_destroy(struct wusbhc *);
311extern void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb,
312 int status);
313void wusbhc_reset_all(struct wusbhc *wusbhc);
314
315int wusbhc_pal_register(struct wusbhc *wusbhc);
316void wusbhc_pal_unregister(struct wusbhc *wusbhc);
317
318/*
319 * Return @usb_dev's @usb_hcd (properly referenced) or NULL if gone
320 *
321 * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
322 *
323 * This is a safe assumption as @usb_dev->bus is referenced all the
324 * time during the @usb_dev life cycle.
325 */
326static inline struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
327{
328 struct usb_hcd *usb_hcd;
329 usb_hcd = container_of(usb_dev->bus, struct usb_hcd, self);
330 return usb_get_hcd(usb_hcd);
331}
332
333/*
334 * Increment the reference count on a wusbhc.
335 *
336 * @wusbhc's life cycle is identical to that of the underlying usb_hcd.
337 */
338static inline struct wusbhc *wusbhc_get(struct wusbhc *wusbhc)
339{
340 return usb_get_hcd(&wusbhc->usb_hcd) ? wusbhc : NULL;
341}
342
343/*
344 * Return the wusbhc associated to a @usb_dev
345 *
346 * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
347 *
348 * @returns: wusbhc for @usb_dev; NULL if the @usb_dev is being torn down.
349 * WARNING: referenced at the usb_hcd level, unlocked
350 *
351 * FIXME: move offline
352 */
353static inline struct wusbhc *wusbhc_get_by_usb_dev(struct usb_device *usb_dev)
354{
355 struct wusbhc *wusbhc = NULL;
356 struct usb_hcd *usb_hcd;
357 if (usb_dev->devnum > 1 && !usb_dev->wusb) {
358 /* but root hubs */
359 dev_err(&usb_dev->dev, "devnum %d wusb %d\n", usb_dev->devnum,
360 usb_dev->wusb);
361 BUG_ON(usb_dev->devnum > 1 && !usb_dev->wusb);
362 }
363 usb_hcd = usb_hcd_get_by_usb_dev(usb_dev);
364 if (usb_hcd == NULL)
365 return NULL;
366 BUG_ON(usb_hcd->wireless == 0);
367 return wusbhc = usb_hcd_to_wusbhc(usb_hcd);
368}
369
370
371static inline void wusbhc_put(struct wusbhc *wusbhc)
372{
373 usb_put_hcd(&wusbhc->usb_hcd);
374}
375
376int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid);
377void wusbhc_stop(struct wusbhc *wusbhc);
378extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *);
379
380/* Device connect handling */
381extern int wusbhc_devconnect_create(struct wusbhc *);
382extern void wusbhc_devconnect_destroy(struct wusbhc *);
383extern int wusbhc_devconnect_start(struct wusbhc *wusbhc,
384 const struct wusb_ckhdid *chid);
385extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc);
386extern int wusbhc_devconnect_auth(struct wusbhc *, u8);
387extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr,
388 struct wusb_dn_hdr *dn_hdr, size_t size);
389extern int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port);
390extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port);
391extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
392 void *priv);
393extern int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
394 u8 addr);
395
396/* Wireless USB fake Root Hub methods */
397extern int wusbhc_rh_create(struct wusbhc *);
398extern void wusbhc_rh_destroy(struct wusbhc *);
399
400extern int wusbhc_rh_status_data(struct usb_hcd *, char *);
401extern int wusbhc_rh_control(struct usb_hcd *, u16, u16, u16, char *, u16);
402extern int wusbhc_rh_suspend(struct usb_hcd *);
403extern int wusbhc_rh_resume(struct usb_hcd *);
404extern int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned);
405
406/* MMC handling */
407extern int wusbhc_mmcie_create(struct wusbhc *);
408extern void wusbhc_mmcie_destroy(struct wusbhc *);
409extern int wusbhc_mmcie_set(struct wusbhc *, u8 interval, u8 repeat_cnt,
410 struct wuie_hdr *);
411extern void wusbhc_mmcie_rm(struct wusbhc *, struct wuie_hdr *);
412
413/* Bandwidth reservation */
414int wusbhc_rsv_establish(struct wusbhc *wusbhc);
415void wusbhc_rsv_terminate(struct wusbhc *wusbhc);
416
417/*
418 * I've always said
419 * I wanted a wedding in a church...
420 *
421 * but lately I've been thinking about
422 * the Botanical Gardens.
423 *
424 * We could do it by the tulips.
425 * It'll be beautiful
426 *
427 * --Security!
428 */
429extern int wusb_dev_sec_add(struct wusbhc *, struct usb_device *,
430 struct wusb_dev *);
431extern void wusb_dev_sec_rm(struct wusb_dev *) ;
432extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *,
433 struct wusb_ckhdid *ck);
434void wusbhc_gtk_rekey(struct wusbhc *wusbhc);
435
436
437/* WUSB Cluster ID handling */
438extern u8 wusb_cluster_id_get(void);
439extern void wusb_cluster_id_put(u8);
440
441/*
442 * wusb_port_by_idx - return the port associated to a zero-based port index
443 *
444 * NOTE: valid without locking as long as wusbhc is referenced (as the
445 * number of ports doesn't change). The data pointed to has to
446 * be verified though :)
447 */
448static inline struct wusb_port *wusb_port_by_idx(struct wusbhc *wusbhc,
449 u8 port_idx)
450{
451 return &wusbhc->port[port_idx];
452}
453
454/*
455 * wusb_port_no_to_idx - Convert port number (per usb_dev->portnum) to
456 * a port_idx.
457 *
458 * USB stack USB ports are 1 based!!
459 *
460 * NOTE: only valid for WUSB devices!!!
461 */
462static inline u8 wusb_port_no_to_idx(u8 port_no)
463{
464 return port_no - 1;
465}
466
467extern struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *,
468 struct usb_device *);
469
470/*
471 * Return a referenced wusb_dev given a @usb_dev
472 *
473 * Returns NULL if the usb_dev is being torn down.
474 *
475 * FIXME: move offline
476 */
477static inline
478struct wusb_dev *wusb_dev_get_by_usb_dev(struct usb_device *usb_dev)
479{
480 struct wusbhc *wusbhc;
481 struct wusb_dev *wusb_dev;
482 wusbhc = wusbhc_get_by_usb_dev(usb_dev);
483 if (wusbhc == NULL)
484 return NULL;
485 mutex_lock(&wusbhc->mutex);
486 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
487 mutex_unlock(&wusbhc->mutex);
488 wusbhc_put(wusbhc);
489 return wusb_dev;
490}
491
492/* Misc */
493
494extern struct workqueue_struct *wusbd;
495#endif /* #ifndef __WUSBHC_H__ */
diff --git a/drivers/uwb/Kconfig b/drivers/uwb/Kconfig
new file mode 100644
index 000000000000..ca783127af36
--- /dev/null
+++ b/drivers/uwb/Kconfig
@@ -0,0 +1,90 @@
1#
2# UWB device configuration
3#
4
5menuconfig UWB
6 tristate "Ultra Wideband devices (EXPERIMENTAL)"
7 depends on EXPERIMENTAL
8 depends on PCI
9 default n
10 help
11 UWB is a high-bandwidth, low-power, point-to-point radio
12 technology using a wide spectrum (3.1-10.6GHz). It is
13 optimized for in-room use (480Mbps at 2 meters, 110Mbps at
14 10m). It serves as the transport layer for other protocols,
15 such as Wireless USB (WUSB), IP (WLP) and upcoming
16 Bluetooth and 1394
17
18 The topology is peer to peer; however, higher level
19 protocols (such as WUSB) might impose a master/slave
20 relationship.
21
22 Say Y here if your computer has UWB radio controllers (USB or PCI)
23 based. You will need to enable the radio controllers
24 below. It is ok to select all of them, no harm done.
25
26 For more help check the UWB and WUSB related files in
27 <file:Documentation/usb/>.
28
29 To compile the UWB stack as a module, choose M here.
30
31if UWB
32
33config UWB_HWA
34 tristate "UWB Radio Control driver for WUSB-compliant USB dongles (HWA)"
35 depends on USB
36 help
37 This driver enables the radio controller for HWA USB
38 devices. HWA stands for Host Wire Adapter, and it is a UWB
39 Radio Controller connected to your system via USB. Most of
40 them come with a Wireless USB host controller also.
41
42 To compile this driver select Y (built in) or M (module). It
43 is safe to select any even if you do not have the hardware.
44
45config UWB_WHCI
46 tristate "UWB Radio Control driver for WHCI-compliant cards"
47 depends on PCI
48 help
49 This driver enables the radio controller for WHCI cards.
50
51 WHCI is an specification developed by Intel
52 (http://www.intel.com/technology/comms/wusb/whci.htm) much
53 in the spirit of USB's EHCI, but for UWB and Wireless USB
54 radio/host controllers connected via memmory mapping (eg:
55 PCI). Most of these cards come also with a Wireless USB host
56 controller.
57
58 To compile this driver select Y (built in) or M (module). It
59 is safe to select any even if you do not have the hardware.
60
61config UWB_WLP
62 tristate "Support WiMedia Link Protocol (Ethernet/IP over UWB)"
63 depends on UWB && NET
64 help
65 This is a common library for drivers that implement
66 networking over UWB.
67
68config UWB_I1480U
69 tristate "Support for Intel Wireless UWB Link 1480 HWA"
70 depends on UWB_HWA
71 select FW_LOADER
72 help
73 This driver enables support for the i1480 when connected via
74 USB. It consists of a firmware uploader that will enable it
75 to behave as an HWA device.
76
77 To compile this driver select Y (built in) or M (module). It
78 is safe to select any even if you do not have the hardware.
79
80config UWB_I1480U_WLP
81 tristate "Support for Intel Wireless UWB Link 1480 HWA's WLP interface"
82 depends on UWB_I1480U && UWB_WLP && NET
83 help
84 This driver enables WLP support for the i1480 when connected via
85 USB. WLP is the WiMedia Link Protocol, or IP over UWB.
86
87 To compile this driver select Y (built in) or M (module). It
88 is safe to select any even if you don't have the hardware.
89
90endif # UWB
diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile
new file mode 100644
index 000000000000..257e6908304c
--- /dev/null
+++ b/drivers/uwb/Makefile
@@ -0,0 +1,29 @@
1obj-$(CONFIG_UWB) += uwb.o
2obj-$(CONFIG_UWB_WLP) += wlp/
3obj-$(CONFIG_UWB_WHCI) += umc.o whci.o whc-rc.o
4obj-$(CONFIG_UWB_HWA) += hwa-rc.o
5obj-$(CONFIG_UWB_I1480U) += i1480/
6
7uwb-objs := \
8 address.o \
9 beacon.o \
10 driver.o \
11 drp.o \
12 drp-avail.o \
13 drp-ie.o \
14 est.o \
15 ie.o \
16 lc-dev.o \
17 lc-rc.o \
18 neh.o \
19 pal.o \
20 reset.o \
21 rsv.o \
22 scan.o \
23 uwb-debug.o \
24 uwbd.o
25
26umc-objs := \
27 umc-bus.o \
28 umc-dev.o \
29 umc-drv.o
diff --git a/drivers/uwb/address.c b/drivers/uwb/address.c
new file mode 100644
index 000000000000..1664ae5f1706
--- /dev/null
+++ b/drivers/uwb/address.c
@@ -0,0 +1,374 @@
1/*
2 * Ultra Wide Band
3 * Address management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/errno.h>
27#include <linux/module.h>
28#include <linux/device.h>
29#include <linux/random.h>
30#include <linux/etherdevice.h>
31#include <linux/uwb/debug.h>
32#include "uwb-internal.h"
33
34
35/** Device Address Management command */
36struct uwb_rc_cmd_dev_addr_mgmt {
37 struct uwb_rccb rccb;
38 u8 bmOperationType;
39 u8 baAddr[6];
40} __attribute__((packed));
41
42
43/**
44 * Low level command for setting/getting UWB radio's addresses
45 *
46 * @hwarc: HWA Radio Control interface instance
47 * @bmOperationType:
48 * Set/get, MAC/DEV (see WUSB1.0[8.6.2.2])
49 * @baAddr: address buffer--assumed to have enough data to hold
50 * the address type requested.
51 * @reply: Pointer to reply buffer (can be stack allocated)
52 * @returns: 0 if ok, < 0 errno code on error.
53 *
54 * @cmd has to be allocated because USB cannot grok USB or vmalloc
55 * buffers depending on your combination of host architecture.
56 */
57static
58int uwb_rc_dev_addr_mgmt(struct uwb_rc *rc,
59 u8 bmOperationType, const u8 *baAddr,
60 struct uwb_rc_evt_dev_addr_mgmt *reply)
61{
62 int result;
63 struct uwb_rc_cmd_dev_addr_mgmt *cmd;
64
65 result = -ENOMEM;
66 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
67 if (cmd == NULL)
68 goto error_kzalloc;
69 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
70 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_DEV_ADDR_MGMT);
71 cmd->bmOperationType = bmOperationType;
72 if (baAddr) {
73 size_t size = 0;
74 switch (bmOperationType >> 1) {
75 case 0: size = 2; break;
76 case 1: size = 6; break;
77 default: BUG();
78 }
79 memcpy(cmd->baAddr, baAddr, size);
80 }
81 reply->rceb.bEventType = UWB_RC_CET_GENERAL;
82 reply->rceb.wEvent = UWB_RC_CMD_DEV_ADDR_MGMT;
83 result = uwb_rc_cmd(rc, "DEV-ADDR-MGMT",
84 &cmd->rccb, sizeof(*cmd),
85 &reply->rceb, sizeof(*reply));
86 if (result < 0)
87 goto error_cmd;
88 if (result < sizeof(*reply)) {
89 dev_err(&rc->uwb_dev.dev,
90 "DEV-ADDR-MGMT: not enough data replied: "
91 "%d vs %zu bytes needed\n", result, sizeof(*reply));
92 result = -ENOMSG;
93 } else if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
94 dev_err(&rc->uwb_dev.dev,
95 "DEV-ADDR-MGMT: command execution failed: %s (%d)\n",
96 uwb_rc_strerror(reply->bResultCode),
97 reply->bResultCode);
98 result = -EIO;
99 } else
100 result = 0;
101error_cmd:
102 kfree(cmd);
103error_kzalloc:
104 return result;
105}
106
107
108/**
109 * Set the UWB RC MAC or device address.
110 *
111 * @rc: UWB Radio Controller
112 * @_addr: Pointer to address to write [assumed to be either a
113 * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *'].
114 * @type: Type of address to set (UWB_ADDR_DEV or UWB_ADDR_MAC).
115 * @returns: 0 if ok, < 0 errno code on error.
116 *
117 * Some anal retentivity here: even if both 'struct
118 * uwb_{dev,mac}_addr' have the actual byte array in the same offset
119 * and I could just pass _addr to hwarc_cmd_dev_addr_mgmt(), I prefer
120 * to use some syntatic sugar in case someday we decide to change the
121 * format of the structs. The compiler will optimize it out anyway.
122 */
123static int uwb_rc_addr_set(struct uwb_rc *rc,
124 const void *_addr, enum uwb_addr_type type)
125{
126 int result;
127 u8 bmOperationType = 0x1; /* Set address */
128 const struct uwb_dev_addr *dev_addr = _addr;
129 const struct uwb_mac_addr *mac_addr = _addr;
130 struct uwb_rc_evt_dev_addr_mgmt reply;
131 const u8 *baAddr;
132
133 result = -EINVAL;
134 switch (type) {
135 case UWB_ADDR_DEV:
136 baAddr = dev_addr->data;
137 break;
138 case UWB_ADDR_MAC:
139 baAddr = mac_addr->data;
140 bmOperationType |= 0x2;
141 break;
142 default:
143 return result;
144 }
145 return uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &reply);
146}
147
148
149/**
150 * Get the UWB radio's MAC or device address.
151 *
152 * @rc: UWB Radio Controller
153 * @_addr: Where to write the address data [assumed to be either a
154 * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *'].
155 * @type: Type of address to get (UWB_ADDR_DEV or UWB_ADDR_MAC).
156 * @returns: 0 if ok (and *_addr set), < 0 errno code on error.
157 *
158 * See comment in uwb_rc_addr_set() about anal retentivity in the
159 * type handling of the address variables.
160 */
161static int uwb_rc_addr_get(struct uwb_rc *rc,
162 void *_addr, enum uwb_addr_type type)
163{
164 int result;
165 u8 bmOperationType = 0x0; /* Get address */
166 struct uwb_rc_evt_dev_addr_mgmt evt;
167 struct uwb_dev_addr *dev_addr = _addr;
168 struct uwb_mac_addr *mac_addr = _addr;
169 u8 *baAddr;
170
171 result = -EINVAL;
172 switch (type) {
173 case UWB_ADDR_DEV:
174 baAddr = dev_addr->data;
175 break;
176 case UWB_ADDR_MAC:
177 bmOperationType |= 0x2;
178 baAddr = mac_addr->data;
179 break;
180 default:
181 return result;
182 }
183 result = uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &evt);
184 if (result == 0)
185 switch (type) {
186 case UWB_ADDR_DEV:
187 memcpy(&dev_addr->data, evt.baAddr,
188 sizeof(dev_addr->data));
189 break;
190 case UWB_ADDR_MAC:
191 memcpy(&mac_addr->data, evt.baAddr,
192 sizeof(mac_addr->data));
193 break;
194 default: /* shut gcc up */
195 BUG();
196 }
197 return result;
198}
199
200
201/** Get @rc's MAC address to @addr */
202int uwb_rc_mac_addr_get(struct uwb_rc *rc,
203 struct uwb_mac_addr *addr) {
204 return uwb_rc_addr_get(rc, addr, UWB_ADDR_MAC);
205}
206EXPORT_SYMBOL_GPL(uwb_rc_mac_addr_get);
207
208
209/** Get @rc's device address to @addr */
210int uwb_rc_dev_addr_get(struct uwb_rc *rc,
211 struct uwb_dev_addr *addr) {
212 return uwb_rc_addr_get(rc, addr, UWB_ADDR_DEV);
213}
214EXPORT_SYMBOL_GPL(uwb_rc_dev_addr_get);
215
216
217/** Set @rc's address to @addr */
218int uwb_rc_mac_addr_set(struct uwb_rc *rc,
219 const struct uwb_mac_addr *addr)
220{
221 int result = -EINVAL;
222 mutex_lock(&rc->uwb_dev.mutex);
223 result = uwb_rc_addr_set(rc, addr, UWB_ADDR_MAC);
224 mutex_unlock(&rc->uwb_dev.mutex);
225 return result;
226}
227
228
229/** Set @rc's address to @addr */
230int uwb_rc_dev_addr_set(struct uwb_rc *rc,
231 const struct uwb_dev_addr *addr)
232{
233 int result = -EINVAL;
234 mutex_lock(&rc->uwb_dev.mutex);
235 result = uwb_rc_addr_set(rc, addr, UWB_ADDR_DEV);
236 rc->uwb_dev.dev_addr = *addr;
237 mutex_unlock(&rc->uwb_dev.mutex);
238 return result;
239}
240
241/* Returns !0 if given address is already assigned to device. */
242int __uwb_mac_addr_assigned_check(struct device *dev, void *_addr)
243{
244 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
245 struct uwb_mac_addr *addr = _addr;
246
247 if (!uwb_mac_addr_cmp(addr, &uwb_dev->mac_addr))
248 return !0;
249 return 0;
250}
251
252/* Returns !0 if given address is already assigned to device. */
253int __uwb_dev_addr_assigned_check(struct device *dev, void *_addr)
254{
255 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
256 struct uwb_dev_addr *addr = _addr;
257 if (!uwb_dev_addr_cmp(addr, &uwb_dev->dev_addr))
258 return !0;
259 return 0;
260}
261
262/**
263 * uwb_dev_addr_assign - assigned a generated DevAddr to a radio controller
264 * @rc: the (local) radio controller device requiring a new DevAddr
265 *
266 * A new DevAddr is required when:
267 * - first setting up a radio controller
268 * - if the hardware reports a DevAddr conflict
269 *
270 * The DevAddr is randomly generated in the generated DevAddr range
271 * [0x100, 0xfeff]. The number of devices in a beacon group is limited
272 * by mMaxBPLength (96) so this address space will never be exhausted.
273 *
274 * [ECMA-368] 17.1.1, 17.16.
275 */
276int uwb_rc_dev_addr_assign(struct uwb_rc *rc)
277{
278 struct uwb_dev_addr new_addr;
279
280 do {
281 get_random_bytes(new_addr.data, sizeof(new_addr.data));
282 } while (new_addr.data[0] == 0x00 || new_addr.data[0] == 0xff
283 || __uwb_dev_addr_assigned(rc, &new_addr));
284
285 return uwb_rc_dev_addr_set(rc, &new_addr);
286}
287
288/**
289 * uwbd_evt_handle_rc_dev_addr_conflict - handle a DEV_ADDR_CONFLICT event
290 * @evt: the DEV_ADDR_CONFLICT notification from the radio controller
291 *
292 * A new (non-conflicting) DevAddr is assigned to the radio controller.
293 *
294 * [ECMA-368] 17.1.1.1.
295 */
296int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt)
297{
298 struct uwb_rc *rc = evt->rc;
299
300 return uwb_rc_dev_addr_assign(rc);
301}
302
303/*
304 * Print the 48-bit EUI MAC address of the radio controller when
305 * reading /sys/class/uwb_rc/XX/mac_address
306 */
307static ssize_t uwb_rc_mac_addr_show(struct device *dev,
308 struct device_attribute *attr, char *buf)
309{
310 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
311 struct uwb_rc *rc = uwb_dev->rc;
312 struct uwb_mac_addr addr;
313 ssize_t result;
314
315 mutex_lock(&rc->uwb_dev.mutex);
316 result = uwb_rc_addr_get(rc, &addr, UWB_ADDR_MAC);
317 mutex_unlock(&rc->uwb_dev.mutex);
318 if (result >= 0) {
319 result = uwb_mac_addr_print(buf, UWB_ADDR_STRSIZE, &addr);
320 buf[result++] = '\n';
321 }
322 return result;
323}
324
325/*
326 * Parse a 48 bit address written to /sys/class/uwb_rc/XX/mac_address
327 * and if correct, set it.
328 */
329static ssize_t uwb_rc_mac_addr_store(struct device *dev,
330 struct device_attribute *attr,
331 const char *buf, size_t size)
332{
333 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
334 struct uwb_rc *rc = uwb_dev->rc;
335 struct uwb_mac_addr addr;
336 ssize_t result;
337
338 result = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx\n",
339 &addr.data[0], &addr.data[1], &addr.data[2],
340 &addr.data[3], &addr.data[4], &addr.data[5]);
341 if (result != 6) {
342 result = -EINVAL;
343 goto out;
344 }
345 if (is_multicast_ether_addr(addr.data)) {
346 dev_err(&rc->uwb_dev.dev, "refusing to set multicast "
347 "MAC address %s\n", buf);
348 result = -EINVAL;
349 goto out;
350 }
351 result = uwb_rc_mac_addr_set(rc, &addr);
352 if (result == 0)
353 rc->uwb_dev.mac_addr = addr;
354out:
355 return result < 0 ? result : size;
356}
357DEVICE_ATTR(mac_address, S_IRUGO | S_IWUSR, uwb_rc_mac_addr_show, uwb_rc_mac_addr_store);
358
359/** Print @addr to @buf, @return bytes written */
360size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr,
361 int type)
362{
363 size_t result;
364 if (type)
365 result = scnprintf(buf, buf_size,
366 "%02x:%02x:%02x:%02x:%02x:%02x",
367 addr[0], addr[1], addr[2],
368 addr[3], addr[4], addr[5]);
369 else
370 result = scnprintf(buf, buf_size, "%02x:%02x",
371 addr[1], addr[0]);
372 return result;
373}
374EXPORT_SYMBOL_GPL(__uwb_addr_print);
diff --git a/drivers/uwb/beacon.c b/drivers/uwb/beacon.c
new file mode 100644
index 000000000000..46b18eec5026
--- /dev/null
+++ b/drivers/uwb/beacon.c
@@ -0,0 +1,642 @@
1/*
2 * Ultra Wide Band
3 * Beacon management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/device.h>
30#include <linux/err.h>
31#include <linux/kdev_t.h>
32#include "uwb-internal.h"
33
34#define D_LOCAL 0
35#include <linux/uwb/debug.h>
36
37/** Start Beaconing command structure */
38struct uwb_rc_cmd_start_beacon {
39 struct uwb_rccb rccb;
40 __le16 wBPSTOffset;
41 u8 bChannelNumber;
42} __attribute__((packed));
43
44
45static int uwb_rc_start_beacon(struct uwb_rc *rc, u16 bpst_offset, u8 channel)
46{
47 int result;
48 struct uwb_rc_cmd_start_beacon *cmd;
49 struct uwb_rc_evt_confirm reply;
50
51 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
52 if (cmd == NULL)
53 return -ENOMEM;
54 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
55 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_START_BEACON);
56 cmd->wBPSTOffset = cpu_to_le16(bpst_offset);
57 cmd->bChannelNumber = channel;
58 reply.rceb.bEventType = UWB_RC_CET_GENERAL;
59 reply.rceb.wEvent = UWB_RC_CMD_START_BEACON;
60 result = uwb_rc_cmd(rc, "START-BEACON", &cmd->rccb, sizeof(*cmd),
61 &reply.rceb, sizeof(reply));
62 if (result < 0)
63 goto error_cmd;
64 if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
65 dev_err(&rc->uwb_dev.dev,
66 "START-BEACON: command execution failed: %s (%d)\n",
67 uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
68 result = -EIO;
69 }
70error_cmd:
71 kfree(cmd);
72 return result;
73}
74
75static int uwb_rc_stop_beacon(struct uwb_rc *rc)
76{
77 int result;
78 struct uwb_rccb *cmd;
79 struct uwb_rc_evt_confirm reply;
80
81 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
82 if (cmd == NULL)
83 return -ENOMEM;
84 cmd->bCommandType = UWB_RC_CET_GENERAL;
85 cmd->wCommand = cpu_to_le16(UWB_RC_CMD_STOP_BEACON);
86 reply.rceb.bEventType = UWB_RC_CET_GENERAL;
87 reply.rceb.wEvent = UWB_RC_CMD_STOP_BEACON;
88 result = uwb_rc_cmd(rc, "STOP-BEACON", cmd, sizeof(*cmd),
89 &reply.rceb, sizeof(reply));
90 if (result < 0)
91 goto error_cmd;
92 if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
93 dev_err(&rc->uwb_dev.dev,
94 "STOP-BEACON: command execution failed: %s (%d)\n",
95 uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
96 result = -EIO;
97 }
98error_cmd:
99 kfree(cmd);
100 return result;
101}
102
103/*
104 * Start/stop beacons
105 *
106 * @rc: UWB Radio Controller to operate on
107 * @channel: UWB channel on which to beacon (WUSB[table
108 * 5-12]). If -1, stop beaconing.
109 * @bpst_offset: Beacon Period Start Time offset; FIXME-do zero
110 *
111 * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
112 * of a SET IE command after the device sent the first beacon that includes
113 * the IEs specified in the SET IE command. So, after we start beaconing we
114 * check if there is anything in the IE cache and call the SET IE command
115 * if needed.
116 */
117int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset)
118{
119 int result;
120 struct device *dev = &rc->uwb_dev.dev;
121
122 mutex_lock(&rc->uwb_dev.mutex);
123 if (channel < 0)
124 channel = -1;
125 if (channel == -1)
126 result = uwb_rc_stop_beacon(rc);
127 else {
128 /* channel >= 0...dah */
129 result = uwb_rc_start_beacon(rc, bpst_offset, channel);
130 if (result < 0)
131 goto out_up;
132 if (le16_to_cpu(rc->ies->wIELength) > 0) {
133 result = uwb_rc_set_ie(rc, rc->ies);
134 if (result < 0) {
135 dev_err(dev, "Cannot set new IE on device: "
136 "%d\n", result);
137 result = uwb_rc_stop_beacon(rc);
138 channel = -1;
139 bpst_offset = 0;
140 } else
141 result = 0;
142 }
143 }
144
145 if (result < 0)
146 goto out_up;
147 rc->beaconing = channel;
148
149 uwb_notify(rc, NULL, uwb_bg_joined(rc) ? UWB_NOTIF_BG_JOIN : UWB_NOTIF_BG_LEAVE);
150
151out_up:
152 mutex_unlock(&rc->uwb_dev.mutex);
153 return result;
154}
155
156/*
157 * Beacon cache
158 *
159 * The purpose of this is to speed up the lookup of becon information
160 * when a new beacon arrives. The UWB Daemon uses it also to keep a
161 * tab of which devices are in radio distance and which not. When a
162 * device's beacon stays present for more than a certain amount of
163 * time, it is considered a new, usable device. When a beacon ceases
164 * to be received for a certain amount of time, it is considered that
165 * the device is gone.
166 *
167 * FIXME: use an allocator for the entries
168 * FIXME: use something faster for search than a list
169 */
170
171struct uwb_beca uwb_beca = {
172 .list = LIST_HEAD_INIT(uwb_beca.list),
173 .mutex = __MUTEX_INITIALIZER(uwb_beca.mutex)
174};
175
176
177void uwb_bce_kfree(struct kref *_bce)
178{
179 struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt);
180
181 kfree(bce->be);
182 kfree(bce);
183}
184
185
186/* Find a beacon by dev addr in the cache */
187static
188struct uwb_beca_e *__uwb_beca_find_bydev(const struct uwb_dev_addr *dev_addr)
189{
190 struct uwb_beca_e *bce, *next;
191 list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
192 d_printf(6, NULL, "looking for addr %02x:%02x in %02x:%02x\n",
193 dev_addr->data[0], dev_addr->data[1],
194 bce->dev_addr.data[0], bce->dev_addr.data[1]);
195 if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr)))
196 goto out;
197 }
198 bce = NULL;
199out:
200 return bce;
201}
202
203/* Find a beacon by dev addr in the cache */
204static
205struct uwb_beca_e *__uwb_beca_find_bymac(const struct uwb_mac_addr *mac_addr)
206{
207 struct uwb_beca_e *bce, *next;
208 list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
209 if (!memcmp(bce->mac_addr, mac_addr->data,
210 sizeof(struct uwb_mac_addr)))
211 goto out;
212 }
213 bce = NULL;
214out:
215 return bce;
216}
217
218/**
219 * uwb_dev_get_by_devaddr - get a UWB device with a specific DevAddr
220 * @rc: the radio controller that saw the device
221 * @devaddr: DevAddr of the UWB device to find
222 *
223 * There may be more than one matching device (in the case of a
224 * DevAddr conflict), but only the first one is returned.
225 */
226struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
227 const struct uwb_dev_addr *devaddr)
228{
229 struct uwb_dev *found = NULL;
230 struct uwb_beca_e *bce;
231
232 mutex_lock(&uwb_beca.mutex);
233 bce = __uwb_beca_find_bydev(devaddr);
234 if (bce)
235 found = uwb_dev_try_get(rc, bce->uwb_dev);
236 mutex_unlock(&uwb_beca.mutex);
237
238 return found;
239}
240
241/**
242 * uwb_dev_get_by_macaddr - get a UWB device with a specific EUI-48
243 * @rc: the radio controller that saw the device
244 * @devaddr: EUI-48 of the UWB device to find
245 */
246struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
247 const struct uwb_mac_addr *macaddr)
248{
249 struct uwb_dev *found = NULL;
250 struct uwb_beca_e *bce;
251
252 mutex_lock(&uwb_beca.mutex);
253 bce = __uwb_beca_find_bymac(macaddr);
254 if (bce)
255 found = uwb_dev_try_get(rc, bce->uwb_dev);
256 mutex_unlock(&uwb_beca.mutex);
257
258 return found;
259}
260
261/* Initialize a beacon cache entry */
262static void uwb_beca_e_init(struct uwb_beca_e *bce)
263{
264 mutex_init(&bce->mutex);
265 kref_init(&bce->refcnt);
266 stats_init(&bce->lqe_stats);
267 stats_init(&bce->rssi_stats);
268}
269
270/*
271 * Add a beacon to the cache
272 *
273 * @be: Beacon event information
274 * @bf: Beacon frame (part of b, really)
275 * @ts_jiffies: Timestamp (in jiffies) when the beacon was received
276 */
277struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be,
278 struct uwb_beacon_frame *bf,
279 unsigned long ts_jiffies)
280{
281 struct uwb_beca_e *bce;
282
283 bce = kzalloc(sizeof(*bce), GFP_KERNEL);
284 if (bce == NULL)
285 return NULL;
286 uwb_beca_e_init(bce);
287 bce->ts_jiffies = ts_jiffies;
288 bce->uwb_dev = NULL;
289 list_add(&bce->node, &uwb_beca.list);
290 return bce;
291}
292
293/*
294 * Wipe out beacon entries that became stale
295 *
296 * Remove associated devicest too.
297 */
298void uwb_beca_purge(void)
299{
300 struct uwb_beca_e *bce, *next;
301 unsigned long expires;
302
303 mutex_lock(&uwb_beca.mutex);
304 list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
305 expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms);
306 if (time_after(jiffies, expires)) {
307 uwbd_dev_offair(bce);
308 list_del(&bce->node);
309 uwb_bce_put(bce);
310 }
311 }
312 mutex_unlock(&uwb_beca.mutex);
313}
314
315/* Clean up the whole beacon cache. Called on shutdown */
316void uwb_beca_release(void)
317{
318 struct uwb_beca_e *bce, *next;
319 mutex_lock(&uwb_beca.mutex);
320 list_for_each_entry_safe(bce, next, &uwb_beca.list, node) {
321 list_del(&bce->node);
322 uwb_bce_put(bce);
323 }
324 mutex_unlock(&uwb_beca.mutex);
325}
326
327static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be,
328 struct uwb_beacon_frame *bf)
329{
330 char macbuf[UWB_ADDR_STRSIZE];
331 char devbuf[UWB_ADDR_STRSIZE];
332 char dstbuf[UWB_ADDR_STRSIZE];
333
334 uwb_mac_addr_print(macbuf, sizeof(macbuf), &bf->Device_Identifier);
335 uwb_dev_addr_print(devbuf, sizeof(devbuf), &bf->hdr.SrcAddr);
336 uwb_dev_addr_print(dstbuf, sizeof(dstbuf), &bf->hdr.DestAddr);
337 dev_info(&rc->uwb_dev.dev,
338 "BEACON from %s to %s (ch%u offset %u slot %u MAC %s)\n",
339 devbuf, dstbuf, be->bChannelNumber, be->wBPSTOffset,
340 bf->Beacon_Slot_Number, macbuf);
341}
342
343/*
344 * @bce: beacon cache entry, referenced
345 */
346ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce,
347 char *buf, size_t size)
348{
349 ssize_t result = 0;
350 struct uwb_rc_evt_beacon *be;
351 struct uwb_beacon_frame *bf;
352 struct uwb_buf_ctx ctx = {
353 .buf = buf,
354 .bytes = 0,
355 .size = size
356 };
357
358 mutex_lock(&bce->mutex);
359 be = bce->be;
360 if (be == NULL)
361 goto out;
362 bf = (void *) be->BeaconInfo;
363 uwb_ie_for_each(uwb_dev, uwb_ie_dump_hex, &ctx,
364 bf->IEData, be->wBeaconInfoLength - sizeof(*bf));
365 result = ctx.bytes;
366out:
367 mutex_unlock(&bce->mutex);
368 return result;
369}
370
371/*
372 * Verify that the beacon event, frame and IEs are ok
373 */
374static int uwb_verify_beacon(struct uwb_rc *rc, struct uwb_event *evt,
375 struct uwb_rc_evt_beacon *be)
376{
377 int result = -EINVAL;
378 struct uwb_beacon_frame *bf;
379 struct device *dev = &rc->uwb_dev.dev;
380
381 /* Is there enough data to decode a beacon frame? */
382 if (evt->notif.size < sizeof(*be) + sizeof(*bf)) {
383 dev_err(dev, "BEACON event: Not enough data to decode "
384 "(%zu vs %zu bytes needed)\n", evt->notif.size,
385 sizeof(*be) + sizeof(*bf));
386 goto error;
387 }
388 /* FIXME: make sure beacon frame IEs are fine and that the whole thing
389 * is consistent */
390 result = 0;
391error:
392 return result;
393}
394
395/*
396 * Handle UWB_RC_EVT_BEACON events
397 *
398 * We check the beacon cache to see how the received beacon fares. If
399 * is there already we refresh the timestamp. If not we create a new
400 * entry.
401 *
402 * According to the WHCI and WUSB specs, only one beacon frame is
403 * allowed per notification block, so we don't bother about scanning
404 * for more.
405 */
406int uwbd_evt_handle_rc_beacon(struct uwb_event *evt)
407{
408 int result = -EINVAL;
409 struct uwb_rc *rc;
410 struct uwb_rc_evt_beacon *be;
411 struct uwb_beacon_frame *bf;
412 struct uwb_beca_e *bce;
413 unsigned long last_ts;
414
415 rc = evt->rc;
416 be = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon, rceb);
417 result = uwb_verify_beacon(rc, evt, be);
418 if (result < 0)
419 return result;
420
421 /* FIXME: handle alien beacons. */
422 if (be->bBeaconType == UWB_RC_BEACON_TYPE_OL_ALIEN ||
423 be->bBeaconType == UWB_RC_BEACON_TYPE_NOL_ALIEN) {
424 return -ENOSYS;
425 }
426
427 bf = (struct uwb_beacon_frame *) be->BeaconInfo;
428
429 /*
430 * Drop beacons from devices with a NULL EUI-48 -- they cannot
431 * be uniquely identified.
432 *
433 * It's expected that these will all be WUSB devices and they
434 * have a WUSB specific connection method so ignoring them
435 * here shouldn't be a problem.
436 */
437 if (uwb_mac_addr_bcast(&bf->Device_Identifier))
438 return 0;
439
440 mutex_lock(&uwb_beca.mutex);
441 bce = __uwb_beca_find_bymac(&bf->Device_Identifier);
442 if (bce == NULL) {
443 /* Not in there, a new device is pinging */
444 uwb_beacon_print(evt->rc, be, bf);
445 bce = __uwb_beca_add(be, bf, evt->ts_jiffies);
446 if (bce == NULL) {
447 mutex_unlock(&uwb_beca.mutex);
448 return -ENOMEM;
449 }
450 }
451 mutex_unlock(&uwb_beca.mutex);
452
453 mutex_lock(&bce->mutex);
454 /* purge old beacon data */
455 kfree(bce->be);
456
457 last_ts = bce->ts_jiffies;
458
459 /* Update commonly used fields */
460 bce->ts_jiffies = evt->ts_jiffies;
461 bce->be = be;
462 bce->dev_addr = bf->hdr.SrcAddr;
463 bce->mac_addr = &bf->Device_Identifier;
464 be->wBPSTOffset = le16_to_cpu(be->wBPSTOffset);
465 be->wBeaconInfoLength = le16_to_cpu(be->wBeaconInfoLength);
466 stats_add_sample(&bce->lqe_stats, be->bLQI - 7);
467 stats_add_sample(&bce->rssi_stats, be->bRSSI + 18);
468
469 /*
470 * This might be a beacon from a new device.
471 */
472 if (bce->uwb_dev == NULL)
473 uwbd_dev_onair(evt->rc, bce);
474
475 mutex_unlock(&bce->mutex);
476
477 return 1; /* we keep the event data */
478}
479
480/*
481 * Handle UWB_RC_EVT_BEACON_SIZE events
482 *
483 * XXXXX
484 */
485int uwbd_evt_handle_rc_beacon_size(struct uwb_event *evt)
486{
487 int result = -EINVAL;
488 struct device *dev = &evt->rc->uwb_dev.dev;
489 struct uwb_rc_evt_beacon_size *bs;
490
491 /* Is there enough data to decode the event? */
492 if (evt->notif.size < sizeof(*bs)) {
493 dev_err(dev, "BEACON SIZE notification: Not enough data to "
494 "decode (%zu vs %zu bytes needed)\n",
495 evt->notif.size, sizeof(*bs));
496 goto error;
497 }
498 bs = container_of(evt->notif.rceb, struct uwb_rc_evt_beacon_size, rceb);
499 if (0)
500 dev_info(dev, "Beacon size changed to %u bytes "
501 "(FIXME: action?)\n", le16_to_cpu(bs->wNewBeaconSize));
502 else {
503 /* temporary hack until we do something with this message... */
504 static unsigned count;
505 if (++count % 1000 == 0)
506 dev_info(dev, "Beacon size changed %u times "
507 "(FIXME: action?)\n", count);
508 }
509 result = 0;
510error:
511 return result;
512}
513
514/**
515 * uwbd_evt_handle_rc_bp_slot_change - handle a BP_SLOT_CHANGE event
516 * @evt: the BP_SLOT_CHANGE notification from the radio controller
517 *
518 * If the event indicates that no beacon period slots were available
519 * then radio controller has transitioned to a non-beaconing state.
520 * Otherwise, simply save the current beacon slot.
521 */
522int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *evt)
523{
524 struct uwb_rc *rc = evt->rc;
525 struct device *dev = &rc->uwb_dev.dev;
526 struct uwb_rc_evt_bp_slot_change *bpsc;
527
528 if (evt->notif.size < sizeof(*bpsc)) {
529 dev_err(dev, "BP SLOT CHANGE event: Not enough data\n");
530 return -EINVAL;
531 }
532 bpsc = container_of(evt->notif.rceb, struct uwb_rc_evt_bp_slot_change, rceb);
533
534 mutex_lock(&rc->uwb_dev.mutex);
535 if (uwb_rc_evt_bp_slot_change_no_slot(bpsc)) {
536 dev_info(dev, "stopped beaconing: No free slots in BP\n");
537 rc->beaconing = -1;
538 } else
539 rc->uwb_dev.beacon_slot = uwb_rc_evt_bp_slot_change_slot_num(bpsc);
540 mutex_unlock(&rc->uwb_dev.mutex);
541
542 return 0;
543}
544
545/**
546 * Handle UWB_RC_EVT_BPOIE_CHANGE events
547 *
548 * XXXXX
549 */
550struct uwb_ie_bpo {
551 struct uwb_ie_hdr hdr;
552 u8 bp_length;
553 u8 data[];
554} __attribute__((packed));
555
556int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *evt)
557{
558 int result = -EINVAL;
559 struct device *dev = &evt->rc->uwb_dev.dev;
560 struct uwb_rc_evt_bpoie_change *bpoiec;
561 struct uwb_ie_bpo *bpoie;
562 static unsigned count; /* FIXME: this is a temp hack */
563 size_t iesize;
564
565 /* Is there enough data to decode it? */
566 if (evt->notif.size < sizeof(*bpoiec)) {
567 dev_err(dev, "BPOIEC notification: Not enough data to "
568 "decode (%zu vs %zu bytes needed)\n",
569 evt->notif.size, sizeof(*bpoiec));
570 goto error;
571 }
572 bpoiec = container_of(evt->notif.rceb, struct uwb_rc_evt_bpoie_change, rceb);
573 iesize = le16_to_cpu(bpoiec->wBPOIELength);
574 if (iesize < sizeof(*bpoie)) {
575 dev_err(dev, "BPOIEC notification: Not enough IE data to "
576 "decode (%zu vs %zu bytes needed)\n",
577 iesize, sizeof(*bpoie));
578 goto error;
579 }
580 if (++count % 1000 == 0) /* Lame placeholder */
581 dev_info(dev, "BPOIE: %u changes received\n", count);
582 /*
583 * FIXME: At this point we should go over all the IEs in the
584 * bpoiec->BPOIE array and act on each.
585 */
586 result = 0;
587error:
588 return result;
589}
590
591/**
592 * uwb_bg_joined - is the RC in a beacon group?
593 * @rc: the radio controller
594 *
595 * Returns true if the radio controller is in a beacon group (even if
596 * it's the sole member).
597 */
598int uwb_bg_joined(struct uwb_rc *rc)
599{
600 return rc->beaconing != -1;
601}
602EXPORT_SYMBOL_GPL(uwb_bg_joined);
603
604/*
605 * Print beaconing state.
606 */
607static ssize_t uwb_rc_beacon_show(struct device *dev,
608 struct device_attribute *attr, char *buf)
609{
610 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
611 struct uwb_rc *rc = uwb_dev->rc;
612 ssize_t result;
613
614 mutex_lock(&rc->uwb_dev.mutex);
615 result = sprintf(buf, "%d\n", rc->beaconing);
616 mutex_unlock(&rc->uwb_dev.mutex);
617 return result;
618}
619
620/*
621 * Start beaconing on the specified channel, or stop beaconing.
622 *
623 * The BPST offset of when to start searching for a beacon group to
624 * join may be specified.
625 */
626static ssize_t uwb_rc_beacon_store(struct device *dev,
627 struct device_attribute *attr,
628 const char *buf, size_t size)
629{
630 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
631 struct uwb_rc *rc = uwb_dev->rc;
632 int channel;
633 unsigned bpst_offset = 0;
634 ssize_t result = -EINVAL;
635
636 result = sscanf(buf, "%d %u\n", &channel, &bpst_offset);
637 if (result >= 1)
638 result = uwb_rc_beacon(rc, channel, bpst_offset);
639
640 return result < 0 ? result : size;
641}
642DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, uwb_rc_beacon_show, uwb_rc_beacon_store);
diff --git a/drivers/uwb/driver.c b/drivers/uwb/driver.c
new file mode 100644
index 000000000000..521cdeb84971
--- /dev/null
+++ b/drivers/uwb/driver.c
@@ -0,0 +1,144 @@
1/*
2 * Ultra Wide Band
3 * Driver initialization, etc
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * Life cycle: FIXME: explain
26 *
27 * UWB radio controller:
28 *
29 * 1. alloc a uwb_rc, zero it
30 * 2. call uwb_rc_init() on it to set it up + ops (won't do any
31 * kind of allocation)
32 * 3. register (now it is owned by the UWB stack--deregister before
33 * freeing/destroying).
34 * 4. It lives on it's own now (UWB stack handles)--when it
35 * disconnects, call unregister()
36 * 5. free it.
37 *
38 * Make sure you have a reference to the uwb_rc before calling
39 * any of the UWB API functions.
40 *
41 * TODO:
42 *
43 * 1. Locking and life cycle management is crappy still. All entry
44 * points to the UWB HCD API assume you have a reference on the
45 * uwb_rc structure and that it won't go away. They mutex lock it
46 * before doing anything.
47 */
48
49#include <linux/kernel.h>
50#include <linux/init.h>
51#include <linux/module.h>
52#include <linux/device.h>
53#include <linux/err.h>
54#include <linux/kdev_t.h>
55#include <linux/random.h>
56#include <linux/uwb/debug.h>
57#include "uwb-internal.h"
58
59
60/* UWB stack attributes (or 'global' constants) */
61
62
63/**
64 * If a beacon dissapears for longer than this, then we consider the
65 * device who was represented by that beacon to be gone.
66 *
67 * ECMA-368[17.2.3, last para] establishes that a device must not
68 * consider a device to be its neighbour if he doesn't receive a beacon
69 * for more than mMaxLostBeacons. mMaxLostBeacons is defined in
70 * ECMA-368[17.16] as 3; because we can get only one beacon per
71 * superframe, that'd be 3 * 65ms = 195 ~ 200 ms. Let's give it time
72 * for jitter and stuff and make it 500 ms.
73 */
74unsigned long beacon_timeout_ms = 500;
75
76static
77ssize_t beacon_timeout_ms_show(struct class *class, char *buf)
78{
79 return scnprintf(buf, PAGE_SIZE, "%lu\n", beacon_timeout_ms);
80}
81
82static
83ssize_t beacon_timeout_ms_store(struct class *class,
84 const char *buf, size_t size)
85{
86 unsigned long bt;
87 ssize_t result;
88 result = sscanf(buf, "%lu", &bt);
89 if (result != 1)
90 return -EINVAL;
91 beacon_timeout_ms = bt;
92 return size;
93}
94
95static struct class_attribute uwb_class_attrs[] = {
96 __ATTR(beacon_timeout_ms, S_IWUSR | S_IRUGO,
97 beacon_timeout_ms_show, beacon_timeout_ms_store),
98 __ATTR_NULL,
99};
100
101/** Device model classes */
102struct class uwb_rc_class = {
103 .name = "uwb_rc",
104 .class_attrs = uwb_class_attrs,
105};
106
107
108static int __init uwb_subsys_init(void)
109{
110 int result = 0;
111
112 result = uwb_est_create();
113 if (result < 0) {
114 printk(KERN_ERR "uwb: Can't initialize EST subsystem\n");
115 goto error_est_init;
116 }
117
118 result = class_register(&uwb_rc_class);
119 if (result < 0)
120 goto error_uwb_rc_class_register;
121 uwbd_start();
122 uwb_dbg_init();
123 return 0;
124
125error_uwb_rc_class_register:
126 uwb_est_destroy();
127error_est_init:
128 return result;
129}
130module_init(uwb_subsys_init);
131
132static void __exit uwb_subsys_exit(void)
133{
134 uwb_dbg_exit();
135 uwbd_stop();
136 class_unregister(&uwb_rc_class);
137 uwb_est_destroy();
138 return;
139}
140module_exit(uwb_subsys_exit);
141
142MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
143MODULE_DESCRIPTION("Ultra Wide Band core");
144MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/drp-avail.c b/drivers/uwb/drp-avail.c
new file mode 100644
index 000000000000..3febd8552808
--- /dev/null
+++ b/drivers/uwb/drp-avail.c
@@ -0,0 +1,288 @@
1/*
2 * Ultra Wide Band
3 * DRP availability management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Reinette Chatre <reinette.chatre@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 *
21 *
22 * Manage DRP Availability (the MAS available for DRP
23 * reservations). Thus:
24 *
25 * - Handle DRP Availability Change notifications
26 *
27 * - Allow the reservation manager to indicate MAS reserved/released
28 * by local (owned by/targeted at the radio controller)
29 * reservations.
30 *
31 * - Based on the two sources above, generate a DRP Availability IE to
32 * be included in the beacon.
33 *
34 * See also the documentation for struct uwb_drp_avail.
35 */
36
37#include <linux/errno.h>
38#include <linux/module.h>
39#include <linux/device.h>
40#include <linux/bitmap.h>
41#include "uwb-internal.h"
42
43/**
44 * uwb_drp_avail_init - initialize an RC's MAS availability
45 *
46 * All MAS are available initially. The RC will inform use which
47 * slots are used for the BP (it may change in size).
48 */
49void uwb_drp_avail_init(struct uwb_rc *rc)
50{
51 bitmap_fill(rc->drp_avail.global, UWB_NUM_MAS);
52 bitmap_fill(rc->drp_avail.local, UWB_NUM_MAS);
53 bitmap_fill(rc->drp_avail.pending, UWB_NUM_MAS);
54}
55
56/*
57 * Determine MAS available for new local reservations.
58 *
59 * avail = global & local & pending
60 */
61static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail)
62{
63 bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
64 bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS);
65}
66
67/**
68 * uwb_drp_avail_reserve_pending - reserve MAS for a new reservation
69 * @rc: the radio controller
70 * @mas: the MAS to reserve
71 *
72 * Returns 0 on success, or -EBUSY if the MAS requested aren't available.
73 */
74int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas)
75{
76 struct uwb_mas_bm avail;
77
78 uwb_drp_available(rc, &avail);
79 if (!bitmap_subset(mas->bm, avail.bm, UWB_NUM_MAS))
80 return -EBUSY;
81
82 bitmap_andnot(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
83 return 0;
84}
85
86/**
87 * uwb_drp_avail_reserve - reserve MAS for an established reservation
88 * @rc: the radio controller
89 * @mas: the MAS to reserve
90 */
91void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas)
92{
93 bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
94 bitmap_andnot(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
95 rc->drp_avail.ie_valid = false;
96}
97
98/**
99 * uwb_drp_avail_release - release MAS from a pending or established reservation
100 * @rc: the radio controller
101 * @mas: the MAS to release
102 */
103void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas)
104{
105 bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
106 bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
107 rc->drp_avail.ie_valid = false;
108}
109
110/**
111 * uwb_drp_avail_ie_update - update the DRP Availability IE
112 * @rc: the radio controller
113 *
114 * avail = global & local
115 */
116void uwb_drp_avail_ie_update(struct uwb_rc *rc)
117{
118 struct uwb_mas_bm avail;
119
120 bitmap_and(avail.bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
121
122 rc->drp_avail.ie.hdr.element_id = UWB_IE_DRP_AVAILABILITY;
123 rc->drp_avail.ie.hdr.length = UWB_NUM_MAS / 8;
124 uwb_mas_bm_copy_le(rc->drp_avail.ie.bmp, &avail);
125 rc->drp_avail.ie_valid = true;
126}
127
128/**
129 * Create an unsigned long from a buffer containing a byte stream.
130 *
131 * @array: pointer to buffer
132 * @itr: index of buffer from where we start
133 * @len: the buffer's remaining size may not be exact multiple of
134 * sizeof(unsigned long), @len is the length of buffer that needs
135 * to be converted. This will be sizeof(unsigned long) or smaller
136 * (BUG if not). If it is smaller then we will pad the remaining
137 * space of the result with zeroes.
138 */
139static
140unsigned long get_val(u8 *array, size_t itr, size_t len)
141{
142 unsigned long val = 0;
143 size_t top = itr + len;
144
145 BUG_ON(len > sizeof(val));
146
147 while (itr < top) {
148 val <<= 8;
149 val |= array[top - 1];
150 top--;
151 }
152 val <<= 8 * (sizeof(val) - len); /* padding */
153 return val;
154}
155
156/**
157 * Initialize bitmap from data buffer.
158 *
159 * The bitmap to be converted could come from a IE, for example a
160 * DRP Availability IE.
161 * From ECMA-368 1.0 [16.8.7]: "
162 * octets: 1 1 N * (0 to 32)
163 * Element ID Length (=N) DRP Availability Bitmap
164 *
165 * The DRP Availability Bitmap field is up to 256 bits long, one
166 * bit for each MAS in the superframe, where the least-significant
167 * bit of the field corresponds to the first MAS in the superframe
168 * and successive bits correspond to successive MASs."
169 *
170 * The DRP Availability bitmap is in octets from 0 to 32, so octet
171 * 32 contains bits for MAS 1-8, etc. If the bitmap is smaller than 32
172 * octets, the bits in octets not included at the end of the bitmap are
173 * treated as zero. In this case (when the bitmap is smaller than 32
174 * octets) the MAS represented range from MAS 1 to MAS (size of bitmap)
175 * with the last octet still containing bits for MAS 1-8, etc.
176 *
177 * For example:
178 * F00F0102 03040506 0708090A 0B0C0D0E 0F010203
179 * ^^^^
180 * ||||
181 * ||||
182 * |||\LSB of byte is MAS 9
183 * ||\MSB of byte is MAS 16
184 * |\LSB of first byte is MAS 1
185 * \ MSB of byte is MAS 8
186 *
187 * An example of this encoding can be found in ECMA-368 Annex-D [Table D.11]
188 *
189 * The resulting bitmap will have the following mapping:
190 * bit position 0 == MAS 1
191 * bit position 1 == MAS 2
192 * ...
193 * bit position (UWB_NUM_MAS - 1) == MAS UWB_NUM_MAS
194 *
195 * @bmp_itr: pointer to bitmap (can be declared with DECLARE_BITMAP)
196 * @buffer: pointer to buffer containing bitmap data in big endian
197 * format (MSB first)
198 * @buffer_size:number of bytes with which bitmap should be initialized
199 */
200static
201void buffer_to_bmp(unsigned long *bmp_itr, void *_buffer,
202 size_t buffer_size)
203{
204 u8 *buffer = _buffer;
205 size_t itr, len;
206 unsigned long val;
207
208 itr = 0;
209 while (itr < buffer_size) {
210 len = buffer_size - itr >= sizeof(val) ?
211 sizeof(val) : buffer_size - itr;
212 val = get_val(buffer, itr, len);
213 bmp_itr[itr / sizeof(val)] = val;
214 itr += sizeof(val);
215 }
216}
217
218
219/**
220 * Extract DRP Availability bitmap from the notification.
221 *
222 * The notification that comes in contains a bitmap of (UWB_NUM_MAS / 8) bytes
223 * We convert that to our internal representation.
224 */
225static
226int uwbd_evt_get_drp_avail(struct uwb_event *evt, unsigned long *bmp)
227{
228 struct device *dev = &evt->rc->uwb_dev.dev;
229 struct uwb_rc_evt_drp_avail *drp_evt;
230 int result = -EINVAL;
231
232 /* Is there enough data to decode the event? */
233 if (evt->notif.size < sizeof(*drp_evt)) {
234 dev_err(dev, "DRP Availability Change: Not enough "
235 "data to decode event [%zu bytes, %zu "
236 "needed]\n", evt->notif.size, sizeof(*drp_evt));
237 goto error;
238 }
239 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp_avail, rceb);
240 buffer_to_bmp(bmp, drp_evt->bmp, UWB_NUM_MAS/8);
241 result = 0;
242error:
243 return result;
244}
245
246
247/**
248 * Process an incoming DRP Availability notification.
249 *
250 * @evt: Event information (packs the actual event data, which
251 * radio controller it came to, etc).
252 *
253 * @returns: 0 on success (so uwbd() frees the event buffer), < 0
254 * on error.
255 *
256 * According to ECMA-368 1.0 [16.8.7], bits set to ONE indicate that
257 * the MAS slot is available, bits set to ZERO indicate that the slot
258 * is busy.
259 *
260 * So we clear available slots, we set used slots :)
261 *
262 * The notification only marks non-availability based on the BP and
263 * received DRP IEs that are not for this radio controller. A copy of
264 * this bitmap is needed to generate the real availability (which
265 * includes local and pending reservations).
266 *
267 * The DRP Availability IE that this radio controller emits will need
268 * to be updated.
269 */
270int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt)
271{
272 int result;
273 struct uwb_rc *rc = evt->rc;
274 DECLARE_BITMAP(bmp, UWB_NUM_MAS);
275
276 result = uwbd_evt_get_drp_avail(evt, bmp);
277 if (result < 0)
278 return result;
279
280 mutex_lock(&rc->rsvs_mutex);
281 bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS);
282 rc->drp_avail.ie_valid = false;
283 mutex_unlock(&rc->rsvs_mutex);
284
285 uwb_rsv_sched_update(rc);
286
287 return 0;
288}
diff --git a/drivers/uwb/drp-ie.c b/drivers/uwb/drp-ie.c
new file mode 100644
index 000000000000..882724c5f126
--- /dev/null
+++ b/drivers/uwb/drp-ie.c
@@ -0,0 +1,232 @@
1/*
2 * UWB DRP IE management.
3 *
4 * Copyright (C) 2005-2006 Intel Corporation
5 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#include <linux/version.h>
20#include <linux/kernel.h>
21#include <linux/random.h>
22#include <linux/uwb.h>
23
24#include "uwb-internal.h"
25
26/*
27 * Allocate a DRP IE.
28 *
29 * To save having to free/allocate a DRP IE when its MAS changes,
30 * enough memory is allocated for the maxiumum number of DRP
31 * allocation fields. This gives an overhead per reservation of up to
32 * (UWB_NUM_ZONES - 1) * 4 = 60 octets.
33 */
34static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
35{
36 struct uwb_ie_drp *drp_ie;
37 unsigned tiebreaker;
38
39 drp_ie = kzalloc(sizeof(struct uwb_ie_drp) +
40 UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc),
41 GFP_KERNEL);
42 if (drp_ie) {
43 drp_ie->hdr.element_id = UWB_IE_DRP;
44
45 get_random_bytes(&tiebreaker, sizeof(unsigned));
46 uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1);
47 }
48 return drp_ie;
49}
50
51
52/*
53 * Fill a DRP IE's allocation fields from a MAS bitmap.
54 */
55static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
56 struct uwb_mas_bm *mas)
57{
58 int z, i, num_fields = 0, next = 0;
59 struct uwb_drp_alloc *zones;
60 __le16 current_bmp;
61 DECLARE_BITMAP(tmp_bmp, UWB_NUM_MAS);
62 DECLARE_BITMAP(tmp_mas_bm, UWB_MAS_PER_ZONE);
63
64 zones = drp_ie->allocs;
65
66 bitmap_copy(tmp_bmp, mas->bm, UWB_NUM_MAS);
67
68 /* Determine unique MAS bitmaps in zones from bitmap. */
69 for (z = 0; z < UWB_NUM_ZONES; z++) {
70 bitmap_copy(tmp_mas_bm, tmp_bmp, UWB_MAS_PER_ZONE);
71 if (bitmap_weight(tmp_mas_bm, UWB_MAS_PER_ZONE) > 0) {
72 bool found = false;
73 current_bmp = (__le16) *tmp_mas_bm;
74 for (i = 0; i < next; i++) {
75 if (current_bmp == zones[i].mas_bm) {
76 zones[i].zone_bm |= 1 << z;
77 found = true;
78 break;
79 }
80 }
81 if (!found) {
82 num_fields++;
83 zones[next].zone_bm = 1 << z;
84 zones[next].mas_bm = current_bmp;
85 next++;
86 }
87 }
88 bitmap_shift_right(tmp_bmp, tmp_bmp, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
89 }
90
91 /* Store in format ready for transmission (le16). */
92 for (i = 0; i < num_fields; i++) {
93 drp_ie->allocs[i].zone_bm = cpu_to_le16(zones[i].zone_bm);
94 drp_ie->allocs[i].mas_bm = cpu_to_le16(zones[i].mas_bm);
95 }
96
97 drp_ie->hdr.length = sizeof(struct uwb_ie_drp) - sizeof(struct uwb_ie_hdr)
98 + num_fields * sizeof(struct uwb_drp_alloc);
99}
100
101/**
102 * uwb_drp_ie_update - update a reservation's DRP IE
103 * @rsv: the reservation
104 */
105int uwb_drp_ie_update(struct uwb_rsv *rsv)
106{
107 struct device *dev = &rsv->rc->uwb_dev.dev;
108 struct uwb_ie_drp *drp_ie;
109 int reason_code, status;
110
111 switch (rsv->state) {
112 case UWB_RSV_STATE_NONE:
113 kfree(rsv->drp_ie);
114 rsv->drp_ie = NULL;
115 return 0;
116 case UWB_RSV_STATE_O_INITIATED:
117 reason_code = UWB_DRP_REASON_ACCEPTED;
118 status = 0;
119 break;
120 case UWB_RSV_STATE_O_PENDING:
121 reason_code = UWB_DRP_REASON_ACCEPTED;
122 status = 0;
123 break;
124 case UWB_RSV_STATE_O_MODIFIED:
125 reason_code = UWB_DRP_REASON_MODIFIED;
126 status = 1;
127 break;
128 case UWB_RSV_STATE_O_ESTABLISHED:
129 reason_code = UWB_DRP_REASON_ACCEPTED;
130 status = 1;
131 break;
132 case UWB_RSV_STATE_T_ACCEPTED:
133 reason_code = UWB_DRP_REASON_ACCEPTED;
134 status = 1;
135 break;
136 case UWB_RSV_STATE_T_DENIED:
137 reason_code = UWB_DRP_REASON_DENIED;
138 status = 0;
139 break;
140 default:
141 dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state);
142 return -EINVAL;
143 }
144
145 if (rsv->drp_ie == NULL) {
146 rsv->drp_ie = uwb_drp_ie_alloc();
147 if (rsv->drp_ie == NULL)
148 return -ENOMEM;
149 }
150 drp_ie = rsv->drp_ie;
151
152 uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv));
153 uwb_ie_drp_set_status(drp_ie, status);
154 uwb_ie_drp_set_reason_code(drp_ie, reason_code);
155 uwb_ie_drp_set_stream_index(drp_ie, rsv->stream);
156 uwb_ie_drp_set_type(drp_ie, rsv->type);
157
158 if (uwb_rsv_is_owner(rsv)) {
159 switch (rsv->target.type) {
160 case UWB_RSV_TARGET_DEV:
161 drp_ie->dev_addr = rsv->target.dev->dev_addr;
162 break;
163 case UWB_RSV_TARGET_DEVADDR:
164 drp_ie->dev_addr = rsv->target.devaddr;
165 break;
166 }
167 } else
168 drp_ie->dev_addr = rsv->owner->dev_addr;
169
170 uwb_drp_ie_from_bm(drp_ie, &rsv->mas);
171
172 rsv->ie_valid = true;
173 return 0;
174}
175
176/*
177 * Set MAS bits from given MAS bitmap in a single zone of large bitmap.
178 *
179 * We are given a zone id and the MAS bitmap of bits that need to be set in
180 * this zone. Note that this zone may already have bits set and this only
181 * adds settings - we cannot simply assign the MAS bitmap contents to the
182 * zone contents. We iterate over the the bits (MAS) in the zone and set the
183 * bits that are set in the given MAS bitmap.
184 */
185static
186void uwb_drp_ie_single_zone_to_bm(struct uwb_mas_bm *bm, u8 zone, u16 mas_bm)
187{
188 int mas;
189 u16 mas_mask;
190
191 for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++) {
192 mas_mask = 1 << mas;
193 if (mas_bm & mas_mask)
194 set_bit(zone * UWB_NUM_ZONES + mas, bm->bm);
195 }
196}
197
198/**
199 * uwb_drp_ie_zones_to_bm - convert DRP allocation fields to a bitmap
200 * @mas: MAS bitmap that will be populated to correspond to the
201 * allocation fields in the DRP IE
202 * @drp_ie: the DRP IE that contains the allocation fields.
203 *
204 * The input format is an array of MAS allocation fields (16 bit Zone
205 * bitmap, 16 bit MAS bitmap) as described in [ECMA-368] section
206 * 16.8.6. The output is a full 256 bit MAS bitmap.
207 *
208 * We go over all the allocation fields, for each allocation field we
209 * know which zones are impacted. We iterate over all the zones
210 * impacted and call a function that will set the correct MAS bits in
211 * each zone.
212 */
213void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
214{
215 int numallocs = (drp_ie->hdr.length - 4) / 4;
216 const struct uwb_drp_alloc *alloc;
217 int cnt;
218 u16 zone_bm, mas_bm;
219 u8 zone;
220 u16 zone_mask;
221
222 for (cnt = 0; cnt < numallocs; cnt++) {
223 alloc = &drp_ie->allocs[cnt];
224 zone_bm = le16_to_cpu(alloc->zone_bm);
225 mas_bm = le16_to_cpu(alloc->mas_bm);
226 for (zone = 0; zone < UWB_NUM_ZONES; zone++) {
227 zone_mask = 1 << zone;
228 if (zone_bm & zone_mask)
229 uwb_drp_ie_single_zone_to_bm(bm, zone, mas_bm);
230 }
231 }
232}
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c
new file mode 100644
index 000000000000..c0b1e5e2bd08
--- /dev/null
+++ b/drivers/uwb/drp.c
@@ -0,0 +1,461 @@
1/*
2 * Ultra Wide Band
3 * Dynamic Reservation Protocol handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/kthread.h>
22#include <linux/freezer.h>
23#include <linux/delay.h>
24#include "uwb-internal.h"
25
26/**
27 * Construct and send the SET DRP IE
28 *
29 * @rc: UWB Host controller
30 * @returns: >= 0 number of bytes still available in the beacon
31 * < 0 errno code on error.
32 *
33 * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
34 * device to include in its beacon at the same time. We thus have to
35 * traverse all reservations and include the DRP IEs of all PENDING
36 * and NEGOTIATED reservations in a SET DRP command for transmission.
37 *
38 * A DRP Availability IE is appended.
39 *
40 * rc->uwb_dev.mutex is held
41 *
42 * FIXME We currently ignore the returned value indicating the remaining space
43 * in beacon. This could be used to deny reservation requests earlier if
44 * determined that they would cause the beacon space to be exceeded.
45 */
46static
47int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc)
48{
49 int result;
50 struct device *dev = &rc->uwb_dev.dev;
51 struct uwb_rc_cmd_set_drp_ie *cmd;
52 struct uwb_rc_evt_set_drp_ie reply;
53 struct uwb_rsv *rsv;
54 int num_bytes = 0;
55 u8 *IEDataptr;
56
57 result = -ENOMEM;
58 /* First traverse all reservations to determine memory needed. */
59 list_for_each_entry(rsv, &rc->reservations, rc_node) {
60 if (rsv->drp_ie != NULL)
61 num_bytes += rsv->drp_ie->hdr.length + 2;
62 }
63 num_bytes += sizeof(rc->drp_avail.ie);
64 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
65 if (cmd == NULL)
66 goto error;
67 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
68 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
69 cmd->wIELength = num_bytes;
70 IEDataptr = (u8 *)&cmd->IEData[0];
71
72 /* Next traverse all reservations to place IEs in allocated memory. */
73 list_for_each_entry(rsv, &rc->reservations, rc_node) {
74 if (rsv->drp_ie != NULL) {
75 memcpy(IEDataptr, rsv->drp_ie,
76 rsv->drp_ie->hdr.length + 2);
77 IEDataptr += rsv->drp_ie->hdr.length + 2;
78 }
79 }
80 memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
81
82 reply.rceb.bEventType = UWB_RC_CET_GENERAL;
83 reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE;
84 result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb,
85 sizeof(*cmd) + num_bytes, &reply.rceb,
86 sizeof(reply));
87 if (result < 0)
88 goto error_cmd;
89 result = le16_to_cpu(reply.wRemainingSpace);
90 if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
91 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution "
92 "failed: %s (%d). RemainingSpace in beacon "
93 "= %d\n", uwb_rc_strerror(reply.bResultCode),
94 reply.bResultCode, result);
95 result = -EIO;
96 } else {
97 dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon "
98 "= %d.\n", result);
99 result = 0;
100 }
101error_cmd:
102 kfree(cmd);
103error:
104 return result;
105
106}
107/**
108 * Send all DRP IEs associated with this host
109 *
110 * @returns: >= 0 number of bytes still available in the beacon
111 * < 0 errno code on error.
112 *
113 * As per the protocol we obtain the host controller device lock to access
114 * bandwidth structures.
115 */
116int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
117{
118 int result;
119
120 mutex_lock(&rc->uwb_dev.mutex);
121 result = uwb_rc_gen_send_drp_ie(rc);
122 mutex_unlock(&rc->uwb_dev.mutex);
123 return result;
124}
125
126void uwb_drp_handle_timeout(struct uwb_rsv *rsv)
127{
128 struct device *dev = &rsv->rc->uwb_dev.dev;
129
130 dev_dbg(dev, "reservation timeout in state %s (%d)\n",
131 uwb_rsv_state_str(rsv->state), rsv->state);
132
133 switch (rsv->state) {
134 case UWB_RSV_STATE_O_INITIATED:
135 if (rsv->is_multicast) {
136 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
137 return;
138 }
139 break;
140 case UWB_RSV_STATE_O_ESTABLISHED:
141 if (rsv->is_multicast)
142 return;
143 break;
144 default:
145 break;
146 }
147 uwb_rsv_remove(rsv);
148}
149
150/*
151 * Based on the DRP IE, transition a target reservation to a new
152 * state.
153 */
154static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
155 struct uwb_ie_drp *drp_ie)
156{
157 struct device *dev = &rc->uwb_dev.dev;
158 int status;
159 enum uwb_drp_reason reason_code;
160
161 status = uwb_ie_drp_status(drp_ie);
162 reason_code = uwb_ie_drp_reason_code(drp_ie);
163
164 if (status) {
165 switch (reason_code) {
166 case UWB_DRP_REASON_ACCEPTED:
167 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
168 break;
169 case UWB_DRP_REASON_MODIFIED:
170 dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
171 reason_code, status);
172 break;
173 default:
174 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
175 reason_code, status);
176 }
177 } else {
178 switch (reason_code) {
179 case UWB_DRP_REASON_ACCEPTED:
180 /* New reservations are handled in uwb_rsv_find(). */
181 break;
182 case UWB_DRP_REASON_DENIED:
183 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
184 break;
185 case UWB_DRP_REASON_CONFLICT:
186 case UWB_DRP_REASON_MODIFIED:
187 dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
188 reason_code, status);
189 break;
190 default:
191 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
192 reason_code, status);
193 }
194 }
195}
196
197/*
198 * Based on the DRP IE, transition an owner reservation to a new
199 * state.
200 */
201static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
202 struct uwb_ie_drp *drp_ie)
203{
204 struct device *dev = &rc->uwb_dev.dev;
205 int status;
206 enum uwb_drp_reason reason_code;
207
208 status = uwb_ie_drp_status(drp_ie);
209 reason_code = uwb_ie_drp_reason_code(drp_ie);
210
211 if (status) {
212 switch (reason_code) {
213 case UWB_DRP_REASON_ACCEPTED:
214 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
215 break;
216 case UWB_DRP_REASON_MODIFIED:
217 dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
218 reason_code, status);
219 break;
220 default:
221 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
222 reason_code, status);
223 }
224 } else {
225 switch (reason_code) {
226 case UWB_DRP_REASON_PENDING:
227 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
228 break;
229 case UWB_DRP_REASON_DENIED:
230 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
231 break;
232 case UWB_DRP_REASON_CONFLICT:
233 case UWB_DRP_REASON_MODIFIED:
234 dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n",
235 reason_code, status);
236 break;
237 default:
238 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
239 reason_code, status);
240 }
241 }
242}
243
244/*
245 * Process a received DRP IE, it's either for a reservation owned by
246 * the RC or targeted at it (or it's for a WUSB cluster reservation).
247 */
248static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src,
249 struct uwb_ie_drp *drp_ie)
250{
251 struct uwb_rsv *rsv;
252
253 rsv = uwb_rsv_find(rc, src, drp_ie);
254 if (!rsv) {
255 /*
256 * No reservation? It's either for a recently
257 * terminated reservation; or the DRP IE couldn't be
258 * processed (e.g., an invalid IE or out of memory).
259 */
260 return;
261 }
262
263 /*
264 * Do nothing with DRP IEs for reservations that have been
265 * terminated.
266 */
267 if (rsv->state == UWB_RSV_STATE_NONE) {
268 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
269 return;
270 }
271
272 if (uwb_ie_drp_owner(drp_ie))
273 uwb_drp_process_target(rc, rsv, drp_ie);
274 else
275 uwb_drp_process_owner(rc, rsv, drp_ie);
276}
277
278
279/*
280 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
281 * from a device.
282 */
283static
284void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
285 size_t ielen, struct uwb_dev *src_dev)
286{
287 struct device *dev = &rc->uwb_dev.dev;
288 struct uwb_ie_hdr *ie_hdr;
289 void *ptr;
290
291 ptr = drp_evt->ie_data;
292 for (;;) {
293 ie_hdr = uwb_ie_next(&ptr, &ielen);
294 if (!ie_hdr)
295 break;
296
297 switch (ie_hdr->element_id) {
298 case UWB_IE_DRP_AVAILABILITY:
299 /* FIXME: does something need to be done with this? */
300 break;
301 case UWB_IE_DRP:
302 uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr);
303 break;
304 default:
305 dev_warn(dev, "unexpected IE in DRP notification\n");
306 break;
307 }
308 }
309
310 if (ielen > 0)
311 dev_warn(dev, "%d octets remaining in DRP notification\n",
312 (int)ielen);
313}
314
315
316/*
317 * Go through all the DRP IEs and find the ones that conflict with our
318 * reservations.
319 *
320 * FIXME: must resolve the conflict according the the rules in
321 * [ECMA-368].
322 */
323static
324void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
325 size_t ielen, struct uwb_dev *src_dev)
326{
327 struct device *dev = &rc->uwb_dev.dev;
328 struct uwb_ie_hdr *ie_hdr;
329 struct uwb_ie_drp *drp_ie;
330 void *ptr;
331
332 ptr = drp_evt->ie_data;
333 for (;;) {
334 ie_hdr = uwb_ie_next(&ptr, &ielen);
335 if (!ie_hdr)
336 break;
337
338 drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr);
339
340 /* FIXME: check if this DRP IE conflicts. */
341 }
342
343 if (ielen > 0)
344 dev_warn(dev, "%d octets remaining in DRP notification\n",
345 (int)ielen);
346}
347
348
349/*
350 * Terminate all reservations owned by, or targeted at, 'uwb_dev'.
351 */
352static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev)
353{
354 struct uwb_rsv *rsv;
355
356 list_for_each_entry(rsv, &rc->reservations, rc_node) {
357 if (rsv->owner == uwb_dev
358 || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev))
359 uwb_rsv_remove(rsv);
360 }
361}
362
363
364/**
365 * uwbd_evt_handle_rc_drp - handle a DRP_IE event
366 * @evt: the DRP_IE event from the radio controller
367 *
368 * This processes DRP notifications from the radio controller, either
369 * initiating a new reservation or transitioning an existing
370 * reservation into a different state.
371 *
372 * DRP notifications can occur for three different reasons:
373 *
374 * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
375 * the target or source have been recieved.
376 *
377 * These DRP IEs could be new or for an existing reservation.
378 *
379 * If the DRP IE for an existing reservation ceases to be to
380 * recieved for at least mMaxLostBeacons, the reservation should be
381 * considered to be terminated. Note that the TERMINATE reason (see
382 * below) may not always be signalled (e.g., the remote device has
383 * two or more reservations established with the RC).
384 *
385 * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
386 * group conflict with the RC's reservations.
387 *
388 * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
389 * from a device (i.e., it's terminated all reservations).
390 *
391 * Only the software state of the reservations is changed; the setting
392 * of the radio controller's DRP IEs is done after all the events in
393 * an event buffer are processed. This saves waiting multiple times
394 * for the SET_DRP_IE command to complete.
395 */
396int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
397{
398 struct device *dev = &evt->rc->uwb_dev.dev;
399 struct uwb_rc *rc = evt->rc;
400 struct uwb_rc_evt_drp *drp_evt;
401 size_t ielength, bytes_left;
402 struct uwb_dev_addr src_addr;
403 struct uwb_dev *src_dev;
404 int reason;
405
406 /* Is there enough data to decode the event (and any IEs in
407 its payload)? */
408 if (evt->notif.size < sizeof(*drp_evt)) {
409 dev_err(dev, "DRP event: Not enough data to decode event "
410 "[%zu bytes left, %zu needed]\n",
411 evt->notif.size, sizeof(*drp_evt));
412 return 0;
413 }
414 bytes_left = evt->notif.size - sizeof(*drp_evt);
415 drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
416 ielength = le16_to_cpu(drp_evt->ie_length);
417 if (bytes_left != ielength) {
418 dev_err(dev, "DRP event: Not enough data in payload [%zu"
419 "bytes left, %zu declared in the event]\n",
420 bytes_left, ielength);
421 return 0;
422 }
423
424 memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
425 src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
426 if (!src_dev) {
427 /*
428 * A DRP notification from an unrecognized device.
429 *
430 * This is probably from a WUSB device that doesn't
431 * have an EUI-48 and therefore doesn't show up in the
432 * UWB device database. It's safe to simply ignore
433 * these.
434 */
435 return 0;
436 }
437
438 mutex_lock(&rc->rsvs_mutex);
439
440 reason = uwb_rc_evt_drp_reason(drp_evt);
441
442 switch (reason) {
443 case UWB_DRP_NOTIF_DRP_IE_RCVD:
444 uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
445 break;
446 case UWB_DRP_NOTIF_CONFLICT:
447 uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev);
448 break;
449 case UWB_DRP_NOTIF_TERMINATE:
450 uwb_drp_terminate_all(rc, src_dev);
451 break;
452 default:
453 dev_warn(dev, "ignored DRP event with reason code: %d\n", reason);
454 break;
455 }
456
457 mutex_unlock(&rc->rsvs_mutex);
458
459 uwb_dev_put(src_dev);
460 return 0;
461}
diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c
new file mode 100644
index 000000000000..5fe566b7c845
--- /dev/null
+++ b/drivers/uwb/est.c
@@ -0,0 +1,477 @@
1/*
2 * Ultra Wide Band Radio Control
3 * Event Size Tables management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * Infrastructure, code and data tables for guessing the size of
26 * events received on the notification endpoints of UWB radio
27 * controllers.
28 *
29 * You define a table of events and for each, its size and how to get
30 * the extra size.
31 *
32 * ENTRY POINTS:
33 *
34 * uwb_est_{init/destroy}(): To initialize/release the EST subsystem.
35 *
36 * uwb_est_[u]register(): To un/register event size tables
37 * uwb_est_grow()
38 *
39 * uwb_est_find_size(): Get the size of an event
40 * uwb_est_get_size()
41 */
42#include <linux/spinlock.h>
43#define D_LOCAL 0
44#include <linux/uwb/debug.h>
45#include "uwb-internal.h"
46
47
48struct uwb_est {
49 u16 type_event_high;
50 u16 vendor, product;
51 u8 entries;
52 const struct uwb_est_entry *entry;
53};
54
55
56static struct uwb_est *uwb_est;
57static u8 uwb_est_size;
58static u8 uwb_est_used;
59static DEFINE_RWLOCK(uwb_est_lock);
60
61/**
62 * WUSB Standard Event Size Table, HWA-RC interface
63 *
64 * Sizes for events and notifications type 0 (general), high nibble 0.
65 */
66static
67struct uwb_est_entry uwb_est_00_00xx[] = {
68 [UWB_RC_EVT_IE_RCV] = {
69 .size = sizeof(struct uwb_rc_evt_ie_rcv),
70 .offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength),
71 },
72 [UWB_RC_EVT_BEACON] = {
73 .size = sizeof(struct uwb_rc_evt_beacon),
74 .offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength),
75 },
76 [UWB_RC_EVT_BEACON_SIZE] = {
77 .size = sizeof(struct uwb_rc_evt_beacon_size),
78 },
79 [UWB_RC_EVT_BPOIE_CHANGE] = {
80 .size = sizeof(struct uwb_rc_evt_bpoie_change),
81 .offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change,
82 wBPOIELength),
83 },
84 [UWB_RC_EVT_BP_SLOT_CHANGE] = {
85 .size = sizeof(struct uwb_rc_evt_bp_slot_change),
86 },
87 [UWB_RC_EVT_BP_SWITCH_IE_RCV] = {
88 .size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv),
89 .offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength),
90 },
91 [UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
92 .size = sizeof(struct uwb_rc_evt_dev_addr_conflict),
93 },
94 [UWB_RC_EVT_DRP_AVAIL] = {
95 .size = sizeof(struct uwb_rc_evt_drp_avail)
96 },
97 [UWB_RC_EVT_DRP] = {
98 .size = sizeof(struct uwb_rc_evt_drp),
99 .offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length),
100 },
101 [UWB_RC_EVT_BP_SWITCH_STATUS] = {
102 .size = sizeof(struct uwb_rc_evt_bp_switch_status),
103 },
104 [UWB_RC_EVT_CMD_FRAME_RCV] = {
105 .size = sizeof(struct uwb_rc_evt_cmd_frame_rcv),
106 .offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength),
107 },
108 [UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = {
109 .size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv),
110 .offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength),
111 },
112 [UWB_RC_CMD_CHANNEL_CHANGE] = {
113 .size = sizeof(struct uwb_rc_evt_confirm),
114 },
115 [UWB_RC_CMD_DEV_ADDR_MGMT] = {
116 .size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) },
117 [UWB_RC_CMD_GET_IE] = {
118 .size = sizeof(struct uwb_rc_evt_get_ie),
119 .offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength),
120 },
121 [UWB_RC_CMD_RESET] = {
122 .size = sizeof(struct uwb_rc_evt_confirm),
123 },
124 [UWB_RC_CMD_SCAN] = {
125 .size = sizeof(struct uwb_rc_evt_confirm),
126 },
127 [UWB_RC_CMD_SET_BEACON_FILTER] = {
128 .size = sizeof(struct uwb_rc_evt_confirm),
129 },
130 [UWB_RC_CMD_SET_DRP_IE] = {
131 .size = sizeof(struct uwb_rc_evt_set_drp_ie),
132 },
133 [UWB_RC_CMD_SET_IE] = {
134 .size = sizeof(struct uwb_rc_evt_set_ie),
135 },
136 [UWB_RC_CMD_SET_NOTIFICATION_FILTER] = {
137 .size = sizeof(struct uwb_rc_evt_confirm),
138 },
139 [UWB_RC_CMD_SET_TX_POWER] = {
140 .size = sizeof(struct uwb_rc_evt_confirm),
141 },
142 [UWB_RC_CMD_SLEEP] = {
143 .size = sizeof(struct uwb_rc_evt_confirm),
144 },
145 [UWB_RC_CMD_START_BEACON] = {
146 .size = sizeof(struct uwb_rc_evt_confirm),
147 },
148 [UWB_RC_CMD_STOP_BEACON] = {
149 .size = sizeof(struct uwb_rc_evt_confirm),
150 },
151 [UWB_RC_CMD_BP_MERGE] = {
152 .size = sizeof(struct uwb_rc_evt_confirm),
153 },
154 [UWB_RC_CMD_SEND_COMMAND_FRAME] = {
155 .size = sizeof(struct uwb_rc_evt_confirm),
156 },
157 [UWB_RC_CMD_SET_ASIE_NOTIF] = {
158 .size = sizeof(struct uwb_rc_evt_confirm),
159 },
160};
161
162static
163struct uwb_est_entry uwb_est_01_00xx[] = {
164 [UWB_RC_DAA_ENERGY_DETECTED] = {
165 .size = sizeof(struct uwb_rc_evt_daa_energy_detected),
166 },
167 [UWB_RC_SET_DAA_ENERGY_MASK] = {
168 .size = sizeof(struct uwb_rc_evt_set_daa_energy_mask),
169 },
170 [UWB_RC_SET_NOTIFICATION_FILTER_EX] = {
171 .size = sizeof(struct uwb_rc_evt_set_notification_filter_ex),
172 },
173};
174
175/**
176 * Initialize the EST subsystem
177 *
178 * Register the standard tables also.
179 *
180 * FIXME: tag init
181 */
182int uwb_est_create(void)
183{
184 int result;
185
186 uwb_est_size = 2;
187 uwb_est_used = 0;
188 uwb_est = kzalloc(uwb_est_size * sizeof(uwb_est[0]), GFP_KERNEL);
189 if (uwb_est == NULL)
190 return -ENOMEM;
191
192 result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff,
193 uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx));
194 if (result < 0)
195 goto out;
196 result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff,
197 uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx));
198out:
199 return result;
200}
201
202
203/** Clean it up */
204void uwb_est_destroy(void)
205{
206 kfree(uwb_est);
207 uwb_est = NULL;
208 uwb_est_size = uwb_est_used = 0;
209}
210
211
212/**
213 * Double the capacity of the EST table
214 *
215 * @returns 0 if ok, < 0 errno no error.
216 */
217static
218int uwb_est_grow(void)
219{
220 size_t actual_size = uwb_est_size * sizeof(uwb_est[0]);
221 void *new = kmalloc(2 * actual_size, GFP_ATOMIC);
222 if (new == NULL)
223 return -ENOMEM;
224 memcpy(new, uwb_est, actual_size);
225 memset(new + actual_size, 0, actual_size);
226 kfree(uwb_est);
227 uwb_est = new;
228 uwb_est_size *= 2;
229 return 0;
230}
231
232
233/**
234 * Register an event size table
235 *
236 * Makes room for it if the table is full, and then inserts it in the
237 * right position (entries are sorted by type, event_high, vendor and
238 * then product).
239 *
240 * @vendor: vendor code for matching against the device (0x0000 and
241 * 0xffff mean any); use 0x0000 to force all to match without
242 * checking possible vendor specific ones, 0xfffff to match
243 * after checking vendor specific ones.
244 *
245 * @product: product code from that vendor; same matching rules, use
246 * 0x0000 for not allowing vendor specific matches, 0xffff
247 * for allowing.
248 *
249 * This arragement just makes the tables sort differenty. Because the
250 * table is sorted by growing type-event_high-vendor-product, a zero
251 * vendor will match before than a 0x456a vendor, that will match
252 * before a 0xfffff vendor.
253 *
254 * @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
255 */
256/* FIXME: add bus type to vendor/product code */
257int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product,
258 const struct uwb_est_entry *entry, size_t entries)
259{
260 unsigned long flags;
261 unsigned itr;
262 u16 type_event_high;
263 int result = 0;
264
265 write_lock_irqsave(&uwb_est_lock, flags);
266 if (uwb_est_used == uwb_est_size) {
267 result = uwb_est_grow();
268 if (result < 0)
269 goto out;
270 }
271 /* Find the right spot to insert it in */
272 type_event_high = type << 8 | event_high;
273 for (itr = 0; itr < uwb_est_used; itr++)
274 if (uwb_est[itr].type_event_high < type
275 && uwb_est[itr].vendor < vendor
276 && uwb_est[itr].product < product)
277 break;
278
279 /* Shift others to make room for the new one? */
280 if (itr < uwb_est_used)
281 memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr);
282 uwb_est[itr].type_event_high = type << 8 | event_high;
283 uwb_est[itr].vendor = vendor;
284 uwb_est[itr].product = product;
285 uwb_est[itr].entry = entry;
286 uwb_est[itr].entries = entries;
287 uwb_est_used++;
288out:
289 write_unlock_irqrestore(&uwb_est_lock, flags);
290 return result;
291}
292EXPORT_SYMBOL_GPL(uwb_est_register);
293
294
295/**
296 * Unregister an event size table
297 *
298 * This just removes the specified entry and moves the ones after it
299 * to fill in the gap. This is needed to keep the list sorted; no
300 * reallocation is done to reduce the size of the table.
301 *
302 * We unregister by all the data we used to register instead of by
303 * pointer to the @entry array because we might have used the same
304 * table for a bunch of IDs (for example).
305 *
306 * @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
307 */
308int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product,
309 const struct uwb_est_entry *entry, size_t entries)
310{
311 unsigned long flags;
312 unsigned itr;
313 struct uwb_est est_cmp = {
314 .type_event_high = type << 8 | event_high,
315 .vendor = vendor,
316 .product = product,
317 .entry = entry,
318 .entries = entries
319 };
320 write_lock_irqsave(&uwb_est_lock, flags);
321 for (itr = 0; itr < uwb_est_used; itr++)
322 if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp)))
323 goto found;
324 write_unlock_irqrestore(&uwb_est_lock, flags);
325 return -ENOENT;
326
327found:
328 if (itr < uwb_est_used - 1) /* Not last one? move ones above */
329 memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1);
330 uwb_est_used--;
331 write_unlock_irqrestore(&uwb_est_lock, flags);
332 return 0;
333}
334EXPORT_SYMBOL_GPL(uwb_est_unregister);
335
336
337/**
338 * Get the size of an event from a table
339 *
340 * @rceb: pointer to the buffer with the event
341 * @rceb_size: size of the area pointed to by @rceb in bytes.
342 * @returns: > 0 Size of the event
343 * -ENOSPC An area big enough was not provided to look
344 * ahead into the event's guts and guess the size.
345 * -EINVAL Unknown event code (wEvent).
346 *
347 * This will look at the received RCEB and guess what is the total
348 * size. For variable sized events, it will look further ahead into
349 * their length field to see how much data should be read.
350 *
351 * Note this size is *not* final--the neh (Notification/Event Handle)
352 * might specificy an extra size to add.
353 */
354static
355ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est,
356 u8 event_low, const struct uwb_rceb *rceb,
357 size_t rceb_size)
358{
359 unsigned offset;
360 ssize_t size;
361 struct device *dev = &uwb_rc->uwb_dev.dev;
362 const struct uwb_est_entry *entry;
363
364 size = -ENOENT;
365 if (event_low >= est->entries) { /* in range? */
366 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u out of range\n",
367 est, est->type_event_high, est->vendor, est->product,
368 est->entries, event_low);
369 goto out;
370 }
371 size = -ENOENT;
372 entry = &est->entry[event_low];
373 if (entry->size == 0 && entry->offset == 0) { /* unknown? */
374 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u unknown\n",
375 est, est->type_event_high, est->vendor, est->product,
376 est->entries, event_low);
377 goto out;
378 }
379 offset = entry->offset; /* extra fries with that? */
380 if (offset == 0)
381 size = entry->size;
382 else {
383 /* Ops, got an extra size field at 'offset'--read it */
384 const void *ptr = rceb;
385 size_t type_size = 0;
386 offset--;
387 size = -ENOSPC; /* enough data for more? */
388 switch (entry->type) {
389 case UWB_EST_16: type_size = sizeof(__le16); break;
390 case UWB_EST_8: type_size = sizeof(u8); break;
391 default: BUG();
392 }
393 if (offset + type_size > rceb_size) {
394 dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: "
395 "not enough data to read extra size\n",
396 est, est->type_event_high, est->vendor,
397 est->product, est->entries);
398 goto out;
399 }
400 size = entry->size;
401 ptr += offset;
402 switch (entry->type) {
403 case UWB_EST_16: size += le16_to_cpu(*(__le16 *)ptr); break;
404 case UWB_EST_8: size += *(u8 *)ptr; break;
405 default: BUG();
406 }
407 }
408out:
409 return size;
410}
411
412
413/**
414 * Guesses the size of a WA event
415 *
416 * @rceb: pointer to the buffer with the event
417 * @rceb_size: size of the area pointed to by @rceb in bytes.
418 * @returns: > 0 Size of the event
419 * -ENOSPC An area big enough was not provided to look
420 * ahead into the event's guts and guess the size.
421 * -EINVAL Unknown event code (wEvent).
422 *
423 * This will look at the received RCEB and guess what is the total
424 * size by checking all the tables registered with
425 * uwb_est_register(). For variable sized events, it will look further
426 * ahead into their length field to see how much data should be read.
427 *
428 * Note this size is *not* final--the neh (Notification/Event Handle)
429 * might specificy an extra size to add or replace.
430 */
431ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
432 size_t rceb_size)
433{
434 /* FIXME: add vendor/product data */
435 ssize_t size;
436 struct device *dev = &rc->uwb_dev.dev;
437 unsigned long flags;
438 unsigned itr;
439 u16 type_event_high, event;
440 u8 *ptr = (u8 *) rceb;
441
442 read_lock_irqsave(&uwb_est_lock, flags);
443 d_printf(2, dev, "Size query for event 0x%02x/%04x/%02x,"
444 " buffer size %ld\n",
445 (unsigned) rceb->bEventType,
446 (unsigned) le16_to_cpu(rceb->wEvent),
447 (unsigned) rceb->bEventContext,
448 (long) rceb_size);
449 size = -ENOSPC;
450 if (rceb_size < sizeof(*rceb))
451 goto out;
452 event = le16_to_cpu(rceb->wEvent);
453 type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8;
454 for (itr = 0; itr < uwb_est_used; itr++) {
455 d_printf(3, dev, "Checking EST 0x%04x/%04x/%04x\n",
456 uwb_est[itr].type_event_high, uwb_est[itr].vendor,
457 uwb_est[itr].product);
458 if (uwb_est[itr].type_event_high != type_event_high)
459 continue;
460 size = uwb_est_get_size(rc, &uwb_est[itr],
461 event & 0x00ff, rceb, rceb_size);
462 /* try more tables that might handle the same type */
463 if (size != -ENOENT)
464 goto out;
465 }
466 dev_dbg(dev, "event 0x%02x/%04x/%02x: no handlers available; "
467 "RCEB %02x %02x %02x %02x\n",
468 (unsigned) rceb->bEventType,
469 (unsigned) le16_to_cpu(rceb->wEvent),
470 (unsigned) rceb->bEventContext,
471 ptr[0], ptr[1], ptr[2], ptr[3]);
472 size = -ENOENT;
473out:
474 read_unlock_irqrestore(&uwb_est_lock, flags);
475 return size;
476}
477EXPORT_SYMBOL_GPL(uwb_est_find_size);
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
new file mode 100644
index 000000000000..3d26fa0f8ae1
--- /dev/null
+++ b/drivers/uwb/hwa-rc.c
@@ -0,0 +1,926 @@
1/*
2 * WUSB Host Wire Adapter: Radio Control Interface (WUSB[8.6])
3 * Radio Control command/event transport
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Initialize the Radio Control interface Driver.
24 *
25 * For each device probed, creates an 'struct hwarc' which contains
26 * just the representation of the UWB Radio Controller, and the logic
27 * for reading notifications and passing them to the UWB Core.
28 *
29 * So we initialize all of those, register the UWB Radio Controller
30 * and setup the notification/event handle to pipe the notifications
31 * to the UWB management Daemon.
32 *
33 * Command and event filtering.
34 *
35 * This is the driver for the Radio Control Interface described in WUSB
36 * 1.0. The core UWB module assumes that all drivers are compliant to the
37 * WHCI 0.95 specification. We thus create a filter that parses all
38 * incoming messages from the (WUSB 1.0) device and manipulate them to
39 * conform to the WHCI 0.95 specification. Similarly, outgoing messages
40 * are parsed and manipulated to conform to the WUSB 1.0 compliant messages
41 * that the device expects. Only a few messages are affected:
42 * Affected events:
43 * UWB_RC_EVT_BEACON
44 * UWB_RC_EVT_BP_SLOT_CHANGE
45 * UWB_RC_EVT_DRP_AVAIL
46 * UWB_RC_EVT_DRP
47 * Affected commands:
48 * UWB_RC_CMD_SCAN
49 * UWB_RC_CMD_SET_DRP_IE
50 *
51 *
52 *
53 */
54#include <linux/version.h>
55#include <linux/init.h>
56#include <linux/module.h>
57#include <linux/usb.h>
58#include <linux/usb/wusb.h>
59#include <linux/usb/wusb-wa.h>
60#include <linux/uwb.h>
61#include "uwb-internal.h"
62#define D_LOCAL 1
63#include <linux/uwb/debug.h>
64
65/* The device uses commands and events from the WHCI specification, although
66 * reporting itself as WUSB compliant. */
67#define WUSB_QUIRK_WHCI_CMD_EVT 0x01
68
69/**
70 * Descriptor for an instance of the UWB Radio Control Driver that
71 * attaches to the RCI interface of the Host Wired Adapter.
72 *
73 * Unless there is a lock specific to the 'data members', all access
74 * is protected by uwb_rc->mutex.
75 *
76 * The NEEP (Notification/Event EndPoint) URB (@neep_urb) writes to
77 * @rd_buffer. Note there is no locking because it is perfectly (heh!)
78 * serialized--probe() submits an URB, callback is called, processes
79 * the data (synchronously), submits another URB, and so on. There is
80 * no concurrent access to the buffer.
81 */
82struct hwarc {
83 struct usb_device *usb_dev;
84 struct usb_interface *usb_iface;
85 struct uwb_rc *uwb_rc; /* UWB host controller */
86 struct urb *neep_urb; /* Notification endpoint handling */
87 struct edc neep_edc;
88 void *rd_buffer; /* NEEP read buffer */
89};
90
91
92/* Beacon received notification (WUSB 1.0 [8.6.3.2]) */
93struct uwb_rc_evt_beacon_WUSB_0100 {
94 struct uwb_rceb rceb;
95 u8 bChannelNumber;
96 __le16 wBPSTOffset;
97 u8 bLQI;
98 u8 bRSSI;
99 __le16 wBeaconInfoLength;
100 u8 BeaconInfo[];
101} __attribute__((packed));
102
103/**
104 * Filter WUSB 1.0 BEACON RCV notification to be WHCI 0.95
105 *
106 * @header: the incoming event
107 * @buf_size: size of buffer containing incoming event
108 * @new_size: size of event after filtering completed
109 *
110 * The WHCI 0.95 spec has a "Beacon Type" field. This value is unknown at
111 * the time we receive the beacon from WUSB so we just set it to
112 * UWB_RC_BEACON_TYPE_NEIGHBOR as a default.
113 * The solution below allocates memory upon receipt of every beacon from a
114 * WUSB device. This will deteriorate performance. What is the right way to
115 * do this?
116 */
117static
118int hwarc_filter_evt_beacon_WUSB_0100(struct uwb_rc *rc,
119 struct uwb_rceb **header,
120 const size_t buf_size,
121 size_t *new_size)
122{
123 struct uwb_rc_evt_beacon_WUSB_0100 *be;
124 struct uwb_rc_evt_beacon *newbe;
125 size_t bytes_left, ielength;
126 struct device *dev = &rc->uwb_dev.dev;
127
128 be = container_of(*header, struct uwb_rc_evt_beacon_WUSB_0100, rceb);
129 bytes_left = buf_size;
130 if (bytes_left < sizeof(*be)) {
131 dev_err(dev, "Beacon Received Notification: Not enough data "
132 "to decode for filtering (%zu vs %zu bytes needed)\n",
133 bytes_left, sizeof(*be));
134 return -EINVAL;
135 }
136 bytes_left -= sizeof(*be);
137 ielength = le16_to_cpu(be->wBeaconInfoLength);
138 if (bytes_left < ielength) {
139 dev_err(dev, "Beacon Received Notification: Not enough data "
140 "to decode IEs (%zu vs %zu bytes needed)\n",
141 bytes_left, ielength);
142 return -EINVAL;
143 }
144 newbe = kzalloc(sizeof(*newbe) + ielength, GFP_ATOMIC);
145 if (newbe == NULL)
146 return -ENOMEM;
147 newbe->rceb = be->rceb;
148 newbe->bChannelNumber = be->bChannelNumber;
149 newbe->bBeaconType = UWB_RC_BEACON_TYPE_NEIGHBOR;
150 newbe->wBPSTOffset = be->wBPSTOffset;
151 newbe->bLQI = be->bLQI;
152 newbe->bRSSI = be->bRSSI;
153 newbe->wBeaconInfoLength = be->wBeaconInfoLength;
154 memcpy(newbe->BeaconInfo, be->BeaconInfo, ielength);
155 *header = &newbe->rceb;
156 *new_size = sizeof(*newbe) + ielength;
157 return 1; /* calling function will free memory */
158}
159
160
161/* DRP Availability change notification (WUSB 1.0 [8.6.3.8]) */
162struct uwb_rc_evt_drp_avail_WUSB_0100 {
163 struct uwb_rceb rceb;
164 __le16 wIELength;
165 u8 IEData[];
166} __attribute__((packed));
167
168/**
169 * Filter WUSB 1.0 DRP AVAILABILITY CHANGE notification to be WHCI 0.95
170 *
171 * @header: the incoming event
172 * @buf_size: size of buffer containing incoming event
173 * @new_size: size of event after filtering completed
174 */
175static
176int hwarc_filter_evt_drp_avail_WUSB_0100(struct uwb_rc *rc,
177 struct uwb_rceb **header,
178 const size_t buf_size,
179 size_t *new_size)
180{
181 struct uwb_rc_evt_drp_avail_WUSB_0100 *da;
182 struct uwb_rc_evt_drp_avail *newda;
183 struct uwb_ie_hdr *ie_hdr;
184 size_t bytes_left, ielength;
185 struct device *dev = &rc->uwb_dev.dev;
186
187
188 da = container_of(*header, struct uwb_rc_evt_drp_avail_WUSB_0100, rceb);
189 bytes_left = buf_size;
190 if (bytes_left < sizeof(*da)) {
191 dev_err(dev, "Not enough data to decode DRP Avail "
192 "Notification for filtering. Expected %zu, "
193 "received %zu.\n", (size_t)sizeof(*da), bytes_left);
194 return -EINVAL;
195 }
196 bytes_left -= sizeof(*da);
197 ielength = le16_to_cpu(da->wIELength);
198 if (bytes_left < ielength) {
199 dev_err(dev, "DRP Avail Notification filter: IE length "
200 "[%zu bytes] does not match actual length "
201 "[%zu bytes].\n", ielength, bytes_left);
202 return -EINVAL;
203 }
204 if (ielength < sizeof(*ie_hdr)) {
205 dev_err(dev, "DRP Avail Notification filter: Not enough "
206 "data to decode IE [%zu bytes, %zu needed]\n",
207 ielength, sizeof(*ie_hdr));
208 return -EINVAL;
209 }
210 ie_hdr = (void *) da->IEData;
211 if (ie_hdr->length > 32) {
212 dev_err(dev, "DRP Availability Change event has unexpected "
213 "length for filtering. Expected < 32 bytes, "
214 "got %zu bytes.\n", (size_t)ie_hdr->length);
215 return -EINVAL;
216 }
217 newda = kzalloc(sizeof(*newda), GFP_ATOMIC);
218 if (newda == NULL)
219 return -ENOMEM;
220 newda->rceb = da->rceb;
221 memcpy(newda->bmp, (u8 *) ie_hdr + sizeof(*ie_hdr), ie_hdr->length);
222 *header = &newda->rceb;
223 *new_size = sizeof(*newda);
224 return 1; /* calling function will free memory */
225}
226
227
228/* DRP notification (WUSB 1.0 [8.6.3.9]) */
229struct uwb_rc_evt_drp_WUSB_0100 {
230 struct uwb_rceb rceb;
231 struct uwb_dev_addr wSrcAddr;
232 u8 bExplicit;
233 __le16 wIELength;
234 u8 IEData[];
235} __attribute__((packed));
236
237/**
238 * Filter WUSB 1.0 DRP Notification to be WHCI 0.95
239 *
240 * @header: the incoming event
241 * @buf_size: size of buffer containing incoming event
242 * @new_size: size of event after filtering completed
243 *
244 * It is hard to manage DRP reservations without having a Reason code.
245 * Unfortunately there is none in the WUSB spec. We just set the default to
246 * DRP IE RECEIVED.
247 * We do not currently use the bBeaconSlotNumber value, so we set this to
248 * zero for now.
249 */
250static
251int hwarc_filter_evt_drp_WUSB_0100(struct uwb_rc *rc,
252 struct uwb_rceb **header,
253 const size_t buf_size,
254 size_t *new_size)
255{
256 struct uwb_rc_evt_drp_WUSB_0100 *drpev;
257 struct uwb_rc_evt_drp *newdrpev;
258 size_t bytes_left, ielength;
259 struct device *dev = &rc->uwb_dev.dev;
260
261 drpev = container_of(*header, struct uwb_rc_evt_drp_WUSB_0100, rceb);
262 bytes_left = buf_size;
263 if (bytes_left < sizeof(*drpev)) {
264 dev_err(dev, "Not enough data to decode DRP Notification "
265 "for filtering. Expected %zu, received %zu.\n",
266 (size_t)sizeof(*drpev), bytes_left);
267 return -EINVAL;
268 }
269 ielength = le16_to_cpu(drpev->wIELength);
270 bytes_left -= sizeof(*drpev);
271 if (bytes_left < ielength) {
272 dev_err(dev, "DRP Notification filter: header length [%zu "
273 "bytes] does not match actual length [%zu "
274 "bytes].\n", ielength, bytes_left);
275 return -EINVAL;
276 }
277 newdrpev = kzalloc(sizeof(*newdrpev) + ielength, GFP_ATOMIC);
278 if (newdrpev == NULL)
279 return -ENOMEM;
280 newdrpev->rceb = drpev->rceb;
281 newdrpev->src_addr = drpev->wSrcAddr;
282 newdrpev->reason = UWB_DRP_NOTIF_DRP_IE_RCVD;
283 newdrpev->beacon_slot_number = 0;
284 newdrpev->ie_length = drpev->wIELength;
285 memcpy(newdrpev->ie_data, drpev->IEData, ielength);
286 *header = &newdrpev->rceb;
287 *new_size = sizeof(*newdrpev) + ielength;
288 return 1; /* calling function will free memory */
289}
290
291
292/* Scan Command (WUSB 1.0 [8.6.2.5]) */
293struct uwb_rc_cmd_scan_WUSB_0100 {
294 struct uwb_rccb rccb;
295 u8 bChannelNumber;
296 u8 bScanState;
297} __attribute__((packed));
298
299/**
300 * Filter WHCI 0.95 SCAN command to be WUSB 1.0 SCAN command
301 *
302 * @header: command sent to device (compliant to WHCI 0.95)
303 * @size: size of command sent to device
304 *
305 * We only reduce the size by two bytes because the WUSB 1.0 scan command
306 * does not have the last field (wStarttime). Also, make sure we don't send
307 * the device an unexpected scan type.
308 */
309static
310int hwarc_filter_cmd_scan_WUSB_0100(struct uwb_rc *rc,
311 struct uwb_rccb **header,
312 size_t *size)
313{
314 struct uwb_rc_cmd_scan *sc;
315
316 sc = container_of(*header, struct uwb_rc_cmd_scan, rccb);
317
318 if (sc->bScanState == UWB_SCAN_ONLY_STARTTIME)
319 sc->bScanState = UWB_SCAN_ONLY;
320 /* Don't send the last two bytes. */
321 *size -= 2;
322 return 0;
323}
324
325
326/* SET DRP IE command (WUSB 1.0 [8.6.2.7]) */
327struct uwb_rc_cmd_set_drp_ie_WUSB_0100 {
328 struct uwb_rccb rccb;
329 u8 bExplicit;
330 __le16 wIELength;
331 struct uwb_ie_drp IEData[];
332} __attribute__((packed));
333
334/**
335 * Filter WHCI 0.95 SET DRP IE command to be WUSB 1.0 SET DRP IE command
336 *
337 * @header: command sent to device (compliant to WHCI 0.95)
338 * @size: size of command sent to device
339 *
340 * WUSB has an extra bExplicit field - we assume always explicit
341 * negotiation so this field is set. The command expected by the device is
342 * thus larger than the one prepared by the driver so we need to
343 * reallocate memory to accommodate this.
344 * We trust the driver to send us the correct data so no checking is done
345 * on incoming data - evn though it is variable length.
346 */
347static
348int hwarc_filter_cmd_set_drp_ie_WUSB_0100(struct uwb_rc *rc,
349 struct uwb_rccb **header,
350 size_t *size)
351{
352 struct uwb_rc_cmd_set_drp_ie *orgcmd;
353 struct uwb_rc_cmd_set_drp_ie_WUSB_0100 *cmd;
354 size_t ielength;
355
356 orgcmd = container_of(*header, struct uwb_rc_cmd_set_drp_ie, rccb);
357 ielength = le16_to_cpu(orgcmd->wIELength);
358 cmd = kzalloc(sizeof(*cmd) + ielength, GFP_KERNEL);
359 if (cmd == NULL)
360 return -ENOMEM;
361 cmd->rccb = orgcmd->rccb;
362 cmd->bExplicit = 0;
363 cmd->wIELength = orgcmd->wIELength;
364 memcpy(cmd->IEData, orgcmd->IEData, ielength);
365 *header = &cmd->rccb;
366 *size = sizeof(*cmd) + ielength;
367 return 1; /* calling function will free memory */
368}
369
370
371/**
372 * Filter data from WHCI driver to WUSB device
373 *
374 * @header: WHCI 0.95 compliant command from driver
375 * @size: length of command
376 *
377 * The routine managing commands to the device (uwb_rc_cmd()) will call the
378 * filtering function pointer (if it exists) before it passes any data to
379 * the device. At this time the command has been formatted according to
380 * WHCI 0.95 and is ready to be sent to the device.
381 *
382 * The filter function will be provided with the current command and its
383 * length. The function will manipulate the command if necessary and
384 * potentially reallocate memory for a command that needed more memory that
385 * the given command. If new memory was created the function will return 1
386 * to indicate to the calling function that the memory need to be freed
387 * when not needed any more. The size will contain the new length of the
388 * command.
389 * If memory has not been allocated we rely on the original mechanisms to
390 * free the memory of the command - even when we reduce the value of size.
391 */
392static
393int hwarc_filter_cmd_WUSB_0100(struct uwb_rc *rc, struct uwb_rccb **header,
394 size_t *size)
395{
396 int result;
397 struct uwb_rccb *rccb = *header;
398 int cmd = le16_to_cpu(rccb->wCommand);
399 switch (cmd) {
400 case UWB_RC_CMD_SCAN:
401 result = hwarc_filter_cmd_scan_WUSB_0100(rc, header, size);
402 break;
403 case UWB_RC_CMD_SET_DRP_IE:
404 result = hwarc_filter_cmd_set_drp_ie_WUSB_0100(rc, header, size);
405 break;
406 default:
407 result = -ENOANO;
408 break;
409 }
410 return result;
411}
412
413
414/**
415 * Filter data from WHCI driver to WUSB device
416 *
417 * @header: WHCI 0.95 compliant command from driver
418 * @size: length of command
419 *
420 * Filter commands based on which protocol the device supports. The WUSB
421 * errata should be the same as WHCI 0.95 so we do not filter that here -
422 * only WUSB 1.0.
423 */
424static
425int hwarc_filter_cmd(struct uwb_rc *rc, struct uwb_rccb **header,
426 size_t *size)
427{
428 int result = -ENOANO;
429 if (rc->version == 0x0100)
430 result = hwarc_filter_cmd_WUSB_0100(rc, header, size);
431 return result;
432}
433
434
435/**
436 * Compute return value as sum of incoming value and value at given offset
437 *
438 * @rceb: event for which we compute the size, it contains a variable
439 * length field.
440 * @core_size: size of the "non variable" part of the event
441 * @offset: place in event where the length of the variable part is stored
442 * @buf_size: total length of buffer in which event arrived - we need to make
443 * sure we read the offset in memory that is still part of the event
444 */
445static
446ssize_t hwarc_get_event_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
447 size_t core_size, size_t offset,
448 const size_t buf_size)
449{
450 ssize_t size = -ENOSPC;
451 const void *ptr = rceb;
452 size_t type_size = sizeof(__le16);
453 struct device *dev = &rc->uwb_dev.dev;
454
455 if (offset + type_size >= buf_size) {
456 dev_err(dev, "Not enough data to read extra size of event "
457 "0x%02x/%04x/%02x, only got %zu bytes.\n",
458 rceb->bEventType, le16_to_cpu(rceb->wEvent),
459 rceb->bEventContext, buf_size);
460 goto out;
461 }
462 ptr += offset;
463 size = core_size + le16_to_cpu(*(__le16 *)ptr);
464out:
465 return size;
466}
467
468
469/* Beacon slot change notification (WUSB 1.0 [8.6.3.5]) */
470struct uwb_rc_evt_bp_slot_change_WUSB_0100 {
471 struct uwb_rceb rceb;
472 u8 bSlotNumber;
473} __attribute__((packed));
474
475
476/**
477 * Filter data from WUSB device to WHCI driver
478 *
479 * @header: incoming event
480 * @buf_size: size of buffer in which event arrived
481 * @_event_size: actual size of event in the buffer
482 * @new_size: size of event after filtered
483 *
484 * We don't know how the buffer is constructed - there may be more than one
485 * event in it so buffer length does not determine event length. We first
486 * determine the expected size of the incoming event. This value is passed
487 * back only if the actual filtering succeeded (so we know the computed
488 * expected size is correct). This value will be zero if
489 * the event did not need any filtering.
490 *
491 * WHCI interprets the BP Slot Change event's data differently than
492 * WUSB. The event sizes are exactly the same. The data field
493 * indicates the new beacon slot in which a RC is transmitting its
494 * beacon. The maximum value of this is 96 (wMacBPLength ECMA-368
495 * 17.16 (Table 117)). We thus know that the WUSB value will not set
496 * the bit bNoSlot, so we don't really do anything (placeholder).
497 */
498static
499int hwarc_filter_event_WUSB_0100(struct uwb_rc *rc, struct uwb_rceb **header,
500 const size_t buf_size, size_t *_real_size,
501 size_t *_new_size)
502{
503 int result = -ENOANO;
504 struct uwb_rceb *rceb = *header;
505 int event = le16_to_cpu(rceb->wEvent);
506 size_t event_size;
507 size_t core_size, offset;
508
509 if (rceb->bEventType != UWB_RC_CET_GENERAL)
510 goto out;
511 switch (event) {
512 case UWB_RC_EVT_BEACON:
513 core_size = sizeof(struct uwb_rc_evt_beacon_WUSB_0100);
514 offset = offsetof(struct uwb_rc_evt_beacon_WUSB_0100,
515 wBeaconInfoLength);
516 event_size = hwarc_get_event_size(rc, rceb, core_size,
517 offset, buf_size);
518 if (event_size < 0)
519 goto out;
520 *_real_size = event_size;
521 result = hwarc_filter_evt_beacon_WUSB_0100(rc, header,
522 buf_size, _new_size);
523 break;
524 case UWB_RC_EVT_BP_SLOT_CHANGE:
525 *_new_size = *_real_size =
526 sizeof(struct uwb_rc_evt_bp_slot_change_WUSB_0100);
527 result = 0;
528 break;
529
530 case UWB_RC_EVT_DRP_AVAIL:
531 core_size = sizeof(struct uwb_rc_evt_drp_avail_WUSB_0100);
532 offset = offsetof(struct uwb_rc_evt_drp_avail_WUSB_0100,
533 wIELength);
534 event_size = hwarc_get_event_size(rc, rceb, core_size,
535 offset, buf_size);
536 if (event_size < 0)
537 goto out;
538 *_real_size = event_size;
539 result = hwarc_filter_evt_drp_avail_WUSB_0100(
540 rc, header, buf_size, _new_size);
541 break;
542
543 case UWB_RC_EVT_DRP:
544 core_size = sizeof(struct uwb_rc_evt_drp_WUSB_0100);
545 offset = offsetof(struct uwb_rc_evt_drp_WUSB_0100, wIELength);
546 event_size = hwarc_get_event_size(rc, rceb, core_size,
547 offset, buf_size);
548 if (event_size < 0)
549 goto out;
550 *_real_size = event_size;
551 result = hwarc_filter_evt_drp_WUSB_0100(rc, header,
552 buf_size, _new_size);
553 break;
554
555 default:
556 break;
557 }
558out:
559 return result;
560}
561
562/**
563 * Filter data from WUSB device to WHCI driver
564 *
565 * @header: incoming event
566 * @buf_size: size of buffer in which event arrived
567 * @_event_size: actual size of event in the buffer
568 * @_new_size: size of event after filtered
569 *
570 * Filter events based on which protocol the device supports. The WUSB
571 * errata should be the same as WHCI 0.95 so we do not filter that here -
572 * only WUSB 1.0.
573 *
574 * If we don't handle it, we return -ENOANO (why the weird error code?
575 * well, so if I get it, I can pinpoint in the code that raised
576 * it...after all, not too many places use the higher error codes).
577 */
578static
579int hwarc_filter_event(struct uwb_rc *rc, struct uwb_rceb **header,
580 const size_t buf_size, size_t *_real_size,
581 size_t *_new_size)
582{
583 int result = -ENOANO;
584 if (rc->version == 0x0100)
585 result = hwarc_filter_event_WUSB_0100(
586 rc, header, buf_size, _real_size, _new_size);
587 return result;
588}
589
590
591/**
592 * Execute an UWB RC command on HWA
593 *
594 * @rc: Instance of a Radio Controller that is a HWA
595 * @cmd: Buffer containing the RCCB and payload to execute
596 * @cmd_size: Size of the command buffer.
597 *
598 * NOTE: rc's mutex has to be locked
599 */
600static
601int hwarc_cmd(struct uwb_rc *uwb_rc, const struct uwb_rccb *cmd, size_t cmd_size)
602{
603 struct hwarc *hwarc = uwb_rc->priv;
604 return usb_control_msg(
605 hwarc->usb_dev, usb_sndctrlpipe(hwarc->usb_dev, 0),
606 WA_EXEC_RC_CMD, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
607 0, hwarc->usb_iface->cur_altsetting->desc.bInterfaceNumber,
608 (void *) cmd, cmd_size, 100 /* FIXME: this is totally arbitrary */);
609}
610
611static
612int hwarc_reset(struct uwb_rc *uwb_rc)
613{
614 struct hwarc *hwarc = uwb_rc->priv;
615 return usb_reset_device(hwarc->usb_dev);
616}
617
618/**
619 * Callback for the notification and event endpoint
620 *
621 * Check's that everything is fine and then passes the read data to
622 * the notification/event handling mechanism (neh).
623 */
624static
625void hwarc_neep_cb(struct urb *urb)
626{
627 struct hwarc *hwarc = urb->context;
628 struct usb_interface *usb_iface = hwarc->usb_iface;
629 struct device *dev = &usb_iface->dev;
630 int result;
631
632 switch (result = urb->status) {
633 case 0:
634 d_printf(3, dev, "NEEP: receive stat %d, %zu bytes\n",
635 urb->status, (size_t)urb->actual_length);
636 uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer,
637 urb->actual_length);
638 break;
639 case -ECONNRESET: /* Not an error, but a controlled situation; */
640 case -ENOENT: /* (we killed the URB)...so, no broadcast */
641 d_printf(2, dev, "NEEP: URB reset/noent %d\n", urb->status);
642 goto out;
643 case -ESHUTDOWN: /* going away! */
644 d_printf(2, dev, "NEEP: URB down %d\n", urb->status);
645 goto out;
646 default: /* On general errors, retry unless it gets ugly */
647 if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS,
648 EDC_ERROR_TIMEFRAME))
649 goto error_exceeded;
650 dev_err(dev, "NEEP: URB error %d\n", urb->status);
651 }
652 result = usb_submit_urb(urb, GFP_ATOMIC);
653 d_printf(3, dev, "NEEP: submit %d\n", result);
654 if (result < 0) {
655 dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n",
656 result);
657 goto error;
658 }
659out:
660 return;
661
662error_exceeded:
663 dev_err(dev, "NEEP: URB max acceptable errors "
664 "exceeded, resetting device\n");
665error:
666 uwb_rc_neh_error(hwarc->uwb_rc, result);
667 uwb_rc_reset_all(hwarc->uwb_rc);
668 return;
669}
670
671static void hwarc_init(struct hwarc *hwarc)
672{
673 edc_init(&hwarc->neep_edc);
674}
675
676/**
677 * Initialize the notification/event endpoint stuff
678 *
679 * Note this is effectively a parallel thread; it knows that
680 * hwarc->uwb_rc always exists because the existence of a 'hwarc'
681 * means that there is a reverence on the hwarc->uwb_rc (see
682 * _probe()), and thus _neep_cb() can execute safely.
683 */
684static int hwarc_neep_init(struct uwb_rc *rc)
685{
686 struct hwarc *hwarc = rc->priv;
687 struct usb_interface *iface = hwarc->usb_iface;
688 struct usb_device *usb_dev = interface_to_usbdev(iface);
689 struct device *dev = &iface->dev;
690 int result;
691 struct usb_endpoint_descriptor *epd;
692
693 epd = &iface->cur_altsetting->endpoint[0].desc;
694 hwarc->rd_buffer = (void *) __get_free_page(GFP_KERNEL);
695 if (hwarc->rd_buffer == NULL) {
696 dev_err(dev, "Unable to allocate notification's read buffer\n");
697 goto error_rd_buffer;
698 }
699 hwarc->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
700 if (hwarc->neep_urb == NULL) {
701 dev_err(dev, "Unable to allocate notification URB\n");
702 goto error_urb_alloc;
703 }
704 usb_fill_int_urb(hwarc->neep_urb, usb_dev,
705 usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
706 hwarc->rd_buffer, PAGE_SIZE,
707 hwarc_neep_cb, hwarc, epd->bInterval);
708 result = usb_submit_urb(hwarc->neep_urb, GFP_ATOMIC);
709 if (result < 0) {
710 dev_err(dev, "Cannot submit notification URB: %d\n", result);
711 goto error_neep_submit;
712 }
713 return 0;
714
715error_neep_submit:
716 usb_free_urb(hwarc->neep_urb);
717error_urb_alloc:
718 free_page((unsigned long)hwarc->rd_buffer);
719error_rd_buffer:
720 return -ENOMEM;
721}
722
723
724/** Clean up all the notification endpoint resources */
725static void hwarc_neep_release(struct uwb_rc *rc)
726{
727 struct hwarc *hwarc = rc->priv;
728
729 usb_kill_urb(hwarc->neep_urb);
730 usb_free_urb(hwarc->neep_urb);
731 free_page((unsigned long)hwarc->rd_buffer);
732}
733
734/**
735 * Get the version from class-specific descriptor
736 *
737 * NOTE: this descriptor comes with the big bundled configuration
738 * descriptor that includes the interfaces' and endpoints', so
739 * we just look for it in the cached copy kept by the USB stack.
740 *
741 * NOTE2: We convert LE fields to CPU order.
742 */
743static int hwarc_get_version(struct uwb_rc *rc)
744{
745 int result;
746
747 struct hwarc *hwarc = rc->priv;
748 struct uwb_rc_control_intf_class_desc *descr;
749 struct device *dev = &rc->uwb_dev.dev;
750 struct usb_device *usb_dev = hwarc->usb_dev;
751 char *itr;
752 struct usb_descriptor_header *hdr;
753 size_t itr_size, actconfig_idx;
754 u16 version;
755
756 actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
757 sizeof(usb_dev->config[0]);
758 itr = usb_dev->rawdescriptors[actconfig_idx];
759 itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
760 while (itr_size >= sizeof(*hdr)) {
761 hdr = (struct usb_descriptor_header *) itr;
762 d_printf(3, dev, "Extra device descriptor: "
763 "type %02x/%u bytes @ %zu (%zu left)\n",
764 hdr->bDescriptorType, hdr->bLength,
765 (itr - usb_dev->rawdescriptors[actconfig_idx]),
766 itr_size);
767 if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL)
768 goto found;
769 itr += hdr->bLength;
770 itr_size -= hdr->bLength;
771 }
772 dev_err(dev, "cannot find Radio Control Interface Class descriptor\n");
773 return -ENODEV;
774
775found:
776 result = -EINVAL;
777 if (hdr->bLength > itr_size) { /* is it available? */
778 dev_err(dev, "incomplete Radio Control Interface Class "
779 "descriptor (%zu bytes left, %u needed)\n",
780 itr_size, hdr->bLength);
781 goto error;
782 }
783 if (hdr->bLength < sizeof(*descr)) {
784 dev_err(dev, "short Radio Control Interface Class "
785 "descriptor\n");
786 goto error;
787 }
788 descr = (struct uwb_rc_control_intf_class_desc *) hdr;
789 /* Make LE fields CPU order */
790 version = __le16_to_cpu(descr->bcdRCIVersion);
791 if (version != 0x0100) {
792 dev_err(dev, "Device reports protocol version 0x%04x. We "
793 "do not support that. \n", version);
794 result = -EINVAL;
795 goto error;
796 }
797 rc->version = version;
798 d_printf(3, dev, "Device supports WUSB protocol version 0x%04x \n",
799 rc->version);
800 result = 0;
801error:
802 return result;
803}
804
805/*
806 * By creating a 'uwb_rc', we have a reference on it -- that reference
807 * is the one we drop when we disconnect.
808 *
809 * No need to switch altsettings; according to WUSB1.0[8.6.1.1], there
810 * is only one altsetting allowed.
811 */
812static int hwarc_probe(struct usb_interface *iface,
813 const struct usb_device_id *id)
814{
815 int result;
816 struct uwb_rc *uwb_rc;
817 struct hwarc *hwarc;
818 struct device *dev = &iface->dev;
819
820 result = -ENOMEM;
821 uwb_rc = uwb_rc_alloc();
822 if (uwb_rc == NULL) {
823 dev_err(dev, "unable to allocate RC instance\n");
824 goto error_rc_alloc;
825 }
826 hwarc = kzalloc(sizeof(*hwarc), GFP_KERNEL);
827 if (hwarc == NULL) {
828 dev_err(dev, "unable to allocate HWA RC instance\n");
829 goto error_alloc;
830 }
831 hwarc_init(hwarc);
832 hwarc->usb_dev = usb_get_dev(interface_to_usbdev(iface));
833 hwarc->usb_iface = usb_get_intf(iface);
834 hwarc->uwb_rc = uwb_rc;
835
836 uwb_rc->owner = THIS_MODULE;
837 uwb_rc->start = hwarc_neep_init;
838 uwb_rc->stop = hwarc_neep_release;
839 uwb_rc->cmd = hwarc_cmd;
840 uwb_rc->reset = hwarc_reset;
841 if (id->driver_info & WUSB_QUIRK_WHCI_CMD_EVT) {
842 uwb_rc->filter_cmd = NULL;
843 uwb_rc->filter_event = NULL;
844 } else {
845 uwb_rc->filter_cmd = hwarc_filter_cmd;
846 uwb_rc->filter_event = hwarc_filter_event;
847 }
848
849 result = uwb_rc_add(uwb_rc, dev, hwarc);
850 if (result < 0)
851 goto error_rc_add;
852 result = hwarc_get_version(uwb_rc);
853 if (result < 0) {
854 dev_err(dev, "cannot retrieve version of RC \n");
855 goto error_get_version;
856 }
857 usb_set_intfdata(iface, hwarc);
858 return 0;
859
860error_get_version:
861 uwb_rc_rm(uwb_rc);
862error_rc_add:
863 usb_put_intf(iface);
864 usb_put_dev(hwarc->usb_dev);
865error_alloc:
866 uwb_rc_put(uwb_rc);
867error_rc_alloc:
868 return result;
869}
870
871static void hwarc_disconnect(struct usb_interface *iface)
872{
873 struct hwarc *hwarc = usb_get_intfdata(iface);
874 struct uwb_rc *uwb_rc = hwarc->uwb_rc;
875
876 usb_set_intfdata(hwarc->usb_iface, NULL);
877 uwb_rc_rm(uwb_rc);
878 usb_put_intf(hwarc->usb_iface);
879 usb_put_dev(hwarc->usb_dev);
880 d_printf(1, &hwarc->usb_iface->dev, "freed hwarc %p\n", hwarc);
881 kfree(hwarc);
882 uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */
883}
884
885/** USB device ID's that we handle */
886static struct usb_device_id hwarc_id_table[] = {
887 /* D-Link DUB-1210 */
888 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02),
889 .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
890 /* Intel i1480 (using firmware 1.3PA2-20070828) */
891 { USB_DEVICE_AND_INTERFACE_INFO(0x8086, 0x0c3b, 0xe0, 0x01, 0x02),
892 .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
893 /* Generic match for the Radio Control interface */
894 { USB_INTERFACE_INFO(0xe0, 0x01, 0x02), },
895 { },
896};
897MODULE_DEVICE_TABLE(usb, hwarc_id_table);
898
899static struct usb_driver hwarc_driver = {
900 .name = "hwa-rc",
901 .probe = hwarc_probe,
902 .disconnect = hwarc_disconnect,
903 .id_table = hwarc_id_table,
904};
905
906static int __init hwarc_driver_init(void)
907{
908 int result;
909 result = usb_register(&hwarc_driver);
910 if (result < 0)
911 printk(KERN_ERR "HWA-RC: Cannot register USB driver: %d\n",
912 result);
913 return result;
914
915}
916module_init(hwarc_driver_init);
917
918static void __exit hwarc_driver_exit(void)
919{
920 usb_deregister(&hwarc_driver);
921}
922module_exit(hwarc_driver_exit);
923
924MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
925MODULE_DESCRIPTION("Host Wireless Adapter Radio Control Driver");
926MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/Makefile b/drivers/uwb/i1480/Makefile
new file mode 100644
index 000000000000..212bbc7d4c32
--- /dev/null
+++ b/drivers/uwb/i1480/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_UWB_I1480U) += dfu/ i1480-est.o
2obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp/
diff --git a/drivers/uwb/i1480/dfu/Makefile b/drivers/uwb/i1480/dfu/Makefile
new file mode 100644
index 000000000000..bd1b9f25424c
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/Makefile
@@ -0,0 +1,9 @@
1obj-$(CONFIG_UWB_I1480U) += i1480-dfu-usb.o
2
3i1480-dfu-usb-objs := \
4 dfu.o \
5 mac.o \
6 phy.o \
7 usb.o
8
9
diff --git a/drivers/uwb/i1480/dfu/dfu.c b/drivers/uwb/i1480/dfu/dfu.c
new file mode 100644
index 000000000000..9097b3b30385
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/dfu.c
@@ -0,0 +1,217 @@
1/*
2 * Intel Wireless UWB Link 1480
3 * Main driver
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Common code for firmware upload used by the USB and PCI version;
24 * i1480_fw_upload() takes a device descriptor and uses the function
25 * pointers it provides to upload firmware and prepare the PHY.
26 *
27 * As well, provides common functions used by the rest of the code.
28 */
29#include "i1480-dfu.h"
30#include <linux/errno.h>
31#include <linux/delay.h>
32#include <linux/pci.h>
33#include <linux/device.h>
34#include <linux/uwb.h>
35#include <linux/random.h>
36
37#define D_LOCAL 0
38#include <linux/uwb/debug.h>
39
40/**
41 * i1480_rceb_check - Check RCEB for expected field values
42 * @i1480: pointer to device for which RCEB is being checked
43 * @rceb: RCEB being checked
44 * @cmd: which command the RCEB is related to
45 * @context: expected context
46 * @expected_type: expected event type
47 * @expected_event: expected event
48 *
49 * If @cmd is NULL, do not print error messages, but still return an error
50 * code.
51 *
52 * Return 0 if @rceb matches the expected values, -EINVAL otherwise.
53 */
54int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb,
55 const char *cmd, u8 context, u8 expected_type,
56 unsigned expected_event)
57{
58 int result = 0;
59 struct device *dev = i1480->dev;
60 if (rceb->bEventContext != context) {
61 if (cmd)
62 dev_err(dev, "%s: unexpected context id 0x%02x "
63 "(expected 0x%02x)\n", cmd,
64 rceb->bEventContext, context);
65 result = -EINVAL;
66 }
67 if (rceb->bEventType != expected_type) {
68 if (cmd)
69 dev_err(dev, "%s: unexpected event type 0x%02x "
70 "(expected 0x%02x)\n", cmd,
71 rceb->bEventType, expected_type);
72 result = -EINVAL;
73 }
74 if (le16_to_cpu(rceb->wEvent) != expected_event) {
75 if (cmd)
76 dev_err(dev, "%s: unexpected event 0x%04x "
77 "(expected 0x%04x)\n", cmd,
78 le16_to_cpu(rceb->wEvent), expected_event);
79 result = -EINVAL;
80 }
81 return result;
82}
83EXPORT_SYMBOL_GPL(i1480_rceb_check);
84
85
86/**
87 * Execute a Radio Control Command
88 *
89 * Command data has to be in i1480->cmd_buf.
90 *
91 * @returns size of the reply data filled in i1480->evt_buf or < 0 errno
92 * code on error.
93 */
94ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size,
95 size_t reply_size)
96{
97 ssize_t result;
98 struct uwb_rceb *reply = i1480->evt_buf;
99 struct uwb_rccb *cmd = i1480->cmd_buf;
100 u16 expected_event = reply->wEvent;
101 u8 expected_type = reply->bEventType;
102 u8 context;
103
104 d_fnstart(3, i1480->dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size);
105 init_completion(&i1480->evt_complete);
106 i1480->evt_result = -EINPROGRESS;
107 do {
108 get_random_bytes(&context, 1);
109 } while (context == 0x00 || context == 0xff);
110 cmd->bCommandContext = context;
111 result = i1480->cmd(i1480, cmd_name, cmd_size);
112 if (result < 0)
113 goto error;
114 /* wait for the callback to report a event was received */
115 result = wait_for_completion_interruptible_timeout(
116 &i1480->evt_complete, HZ);
117 if (result == 0) {
118 result = -ETIMEDOUT;
119 goto error;
120 }
121 if (result < 0)
122 goto error;
123 result = i1480->evt_result;
124 if (result < 0) {
125 dev_err(i1480->dev, "%s: command reply reception failed: %zd\n",
126 cmd_name, result);
127 goto error;
128 }
129 /*
130 * Firmware versions >= 1.4.12224 for IOGear GUWA100U generate a
131 * spurious notification after firmware is downloaded. So check whether
132 * the receibed RCEB is such notification before assuming that the
133 * command has failed.
134 */
135 if (i1480_rceb_check(i1480, i1480->evt_buf, NULL,
136 0, 0xfd, 0x0022) == 0) {
137 /* Now wait for the actual RCEB for this command. */
138 result = i1480->wait_init_done(i1480);
139 if (result < 0)
140 goto error;
141 result = i1480->evt_result;
142 }
143 if (result != reply_size) {
144 dev_err(i1480->dev, "%s returned only %zu bytes, %zu expected\n",
145 cmd_name, result, reply_size);
146 result = -EINVAL;
147 goto error;
148 }
149 /* Verify we got the right event in response */
150 result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context,
151 expected_type, expected_event);
152error:
153 d_fnend(3, i1480->dev, "(%p, %s, %zu) = %zd\n",
154 i1480, cmd_name, cmd_size, result);
155 return result;
156}
157EXPORT_SYMBOL_GPL(i1480_cmd);
158
159
160static
161int i1480_print_state(struct i1480 *i1480)
162{
163 int result;
164 u32 *buf = (u32 *) i1480->cmd_buf;
165
166 result = i1480->read(i1480, 0x80080000, 2 * sizeof(*buf));
167 if (result < 0) {
168 dev_err(i1480->dev, "cannot read U & L states: %d\n", result);
169 goto error;
170 }
171 dev_info(i1480->dev, "state U 0x%08x, L 0x%08x\n", buf[0], buf[1]);
172error:
173 return result;
174}
175
176
177/*
178 * PCI probe, firmware uploader
179 *
180 * _mac_fw_upload() will call rc_setup(), which needs an rc_release().
181 */
182int i1480_fw_upload(struct i1480 *i1480)
183{
184 int result;
185
186 result = i1480_pre_fw_upload(i1480); /* PHY pre fw */
187 if (result < 0 && result != -ENOENT) {
188 i1480_print_state(i1480);
189 goto error;
190 }
191 result = i1480_mac_fw_upload(i1480); /* MAC fw */
192 if (result < 0) {
193 if (result == -ENOENT)
194 dev_err(i1480->dev, "Cannot locate MAC FW file '%s'\n",
195 i1480->mac_fw_name);
196 else
197 i1480_print_state(i1480);
198 goto error;
199 }
200 result = i1480_phy_fw_upload(i1480); /* PHY fw */
201 if (result < 0 && result != -ENOENT) {
202 i1480_print_state(i1480);
203 goto error_rc_release;
204 }
205 /*
206 * FIXME: find some reliable way to check whether firmware is running
207 * properly. Maybe use some standard request that has no side effects?
208 */
209 dev_info(i1480->dev, "firmware uploaded successfully\n");
210error_rc_release:
211 if (i1480->rc_release)
212 i1480->rc_release(i1480);
213 result = 0;
214error:
215 return result;
216}
217EXPORT_SYMBOL_GPL(i1480_fw_upload);
diff --git a/drivers/uwb/i1480/dfu/i1480-dfu.h b/drivers/uwb/i1480/dfu/i1480-dfu.h
new file mode 100644
index 000000000000..46f45e800f36
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/i1480-dfu.h
@@ -0,0 +1,260 @@
1/*
2 * i1480 Device Firmware Upload
3 *
4 * Copyright (C) 2005-2006 Intel Corporation
5 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * This driver is the firmware uploader for the Intel Wireless UWB
23 * Link 1480 device (both in the USB and PCI incarnations).
24 *
25 * The process is quite simple: we stop the device, write the firmware
26 * to its memory and then restart it. Wait for the device to let us
27 * know it is done booting firmware. Ready.
28 *
29 * We might have to upload before or after a phy firmware (which might
30 * be done in two methods, using a normal firmware image or through
31 * the MPI port).
32 *
33 * Because USB and PCI use common methods, we just make ops out of the
34 * common operations (read, write, wait_init_done and cmd) and
35 * implement them in usb.c and pci.c.
36 *
37 * The flow is (some parts omitted):
38 *
39 * i1480_{usb,pci}_probe() On enumerate/discovery
40 * i1480_fw_upload()
41 * i1480_pre_fw_upload()
42 * __mac_fw_upload()
43 * fw_hdrs_load()
44 * mac_fw_hdrs_push()
45 * i1480->write() [i1480_{usb,pci}_write()]
46 * i1480_fw_cmp()
47 * i1480->read() [i1480_{usb,pci}_read()]
48 * i1480_mac_fw_upload()
49 * __mac_fw_upload()
50 * i1480->setup(()
51 * i1480->wait_init_done()
52 * i1480_cmd_reset()
53 * i1480->cmd() [i1480_{usb,pci}_cmd()]
54 * ...
55 * i1480_phy_fw_upload()
56 * request_firmware()
57 * i1480_mpi_write()
58 * i1480->cmd() [i1480_{usb,pci}_cmd()]
59 *
60 * Once the probe function enumerates the device and uploads the
61 * firmware, we just exit with -ENODEV, as we don't really want to
62 * attach to the device.
63 */
64#ifndef __i1480_DFU_H__
65#define __i1480_DFU_H__
66
67#include <linux/uwb/spec.h>
68#include <linux/types.h>
69#include <linux/completion.h>
70
71#define i1480_FW_UPLOAD_MODE_MASK (cpu_to_le32(0x00000018))
72
73#if i1480_FW > 0x00000302
74#define i1480_RCEB_EXTENDED
75#endif
76
77struct uwb_rccb;
78struct uwb_rceb;
79
80/*
81 * Common firmware upload handlers
82 *
83 * Normally you embed this struct in another one specific to your hw.
84 *
85 * @write Write to device's memory from buffer.
86 * @read Read from device's memory to i1480->evt_buf.
87 * @setup Setup device after basic firmware is uploaded
88 * @wait_init_done
89 * Wait for the device to send a notification saying init
90 * is done.
91 * @cmd FOP for issuing the command to the hardware. The
92 * command data is contained in i1480->cmd_buf and the size
93 * is supplied as an argument. The command replied is put
94 * in i1480->evt_buf and the size in i1480->evt_result (or if
95 * an error, a < 0 errno code).
96 *
97 * @cmd_buf Memory buffer used to send commands to the device.
98 * Allocated by the upper layers i1480_fw_upload().
99 * Size has to be @buf_size.
100 * @evt_buf Memory buffer used to place the async notifications
101 * received by the hw. Allocated by the upper layers
102 * i1480_fw_upload().
103 * Size has to be @buf_size.
104 * @cmd_complete
105 * Low level driver uses this to notify code waiting afor
106 * an event that the event has arrived and data is in
107 * i1480->evt_buf (and size/result in i1480->evt_result).
108 * @hw_rev
109 * Use this value to activate dfu code to support new revisions
110 * of hardware. i1480_init() sets this to a default value.
111 * It should be updated by the USB and PCI code.
112 */
113struct i1480 {
114 struct device *dev;
115
116 int (*write)(struct i1480 *, u32 addr, const void *, size_t);
117 int (*read)(struct i1480 *, u32 addr, size_t);
118 int (*rc_setup)(struct i1480 *);
119 void (*rc_release)(struct i1480 *);
120 int (*wait_init_done)(struct i1480 *);
121 int (*cmd)(struct i1480 *, const char *cmd_name, size_t cmd_size);
122 const char *pre_fw_name;
123 const char *mac_fw_name;
124 const char *mac_fw_name_deprecate; /* FIXME: Will go away */
125 const char *phy_fw_name;
126 u8 hw_rev;
127
128 size_t buf_size; /* size of both evt_buf and cmd_buf */
129 void *evt_buf, *cmd_buf;
130 ssize_t evt_result;
131 struct completion evt_complete;
132};
133
134static inline
135void i1480_init(struct i1480 *i1480)
136{
137 i1480->hw_rev = 1;
138 init_completion(&i1480->evt_complete);
139}
140
141extern int i1480_fw_upload(struct i1480 *);
142extern int i1480_pre_fw_upload(struct i1480 *);
143extern int i1480_mac_fw_upload(struct i1480 *);
144extern int i1480_phy_fw_upload(struct i1480 *);
145extern ssize_t i1480_cmd(struct i1480 *, const char *, size_t, size_t);
146extern int i1480_rceb_check(const struct i1480 *,
147 const struct uwb_rceb *, const char *, u8,
148 u8, unsigned);
149
150enum {
151 /* Vendor specific command type */
152 i1480_CET_VS1 = 0xfd,
153 /* i1480 commands */
154 i1480_CMD_SET_IP_MAS = 0x000e,
155 i1480_CMD_GET_MAC_PHY_INFO = 0x0003,
156 i1480_CMD_MPI_WRITE = 0x000f,
157 i1480_CMD_MPI_READ = 0x0010,
158 /* i1480 events */
159#if i1480_FW > 0x00000302
160 i1480_EVT_CONFIRM = 0x0002,
161 i1480_EVT_RM_INIT_DONE = 0x0101,
162 i1480_EVT_DEV_ADD = 0x0103,
163 i1480_EVT_DEV_RM = 0x0104,
164 i1480_EVT_DEV_ID_CHANGE = 0x0105,
165 i1480_EVT_GET_MAC_PHY_INFO = i1480_CMD_GET_MAC_PHY_INFO,
166#else
167 i1480_EVT_CONFIRM = 0x0002,
168 i1480_EVT_RM_INIT_DONE = 0x0101,
169 i1480_EVT_DEV_ADD = 0x0103,
170 i1480_EVT_DEV_RM = 0x0104,
171 i1480_EVT_DEV_ID_CHANGE = 0x0105,
172 i1480_EVT_GET_MAC_PHY_INFO = i1480_EVT_CONFIRM,
173#endif
174};
175
176
177struct i1480_evt_confirm {
178 struct uwb_rceb rceb;
179#ifdef i1480_RCEB_EXTENDED
180 __le16 wParamLength;
181#endif
182 u8 bResultCode;
183} __attribute__((packed));
184
185
186struct i1480_rceb {
187 struct uwb_rceb rceb;
188#ifdef i1480_RCEB_EXTENDED
189 __le16 wParamLength;
190#endif
191} __attribute__((packed));
192
193
194/**
195 * Get MAC & PHY Information confirm event structure
196 *
197 * Confirm event returned by the command.
198 */
199struct i1480_evt_confirm_GMPI {
200#if i1480_FW > 0x00000302
201 struct uwb_rceb rceb;
202 __le16 wParamLength;
203 __le16 status;
204 u8 mac_addr[6]; /* EUI-64 bit IEEE address [still 8 bytes?] */
205 u8 dev_addr[2];
206 __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */
207 u8 hw_rev;
208 u8 phy_vendor;
209 u8 phy_rev; /* major v = >> 8; minor = v & 0xff */
210 __le16 mac_caps;
211 u8 phy_caps[3];
212 u8 key_stores;
213 __le16 mcast_addr_stores;
214 u8 sec_mode_supported;
215#else
216 struct uwb_rceb rceb;
217 u8 status;
218 u8 mac_addr[8]; /* EUI-64 bit IEEE address [still 8 bytes?] */
219 u8 dev_addr[2];
220 __le16 mac_fw_rev; /* major = v >> 8; minor = v & 0xff */
221 __le16 phy_fw_rev; /* major v = >> 8; minor = v & 0xff */
222 __le16 mac_caps;
223 u8 phy_caps;
224 u8 key_stores;
225 __le16 mcast_addr_stores;
226 u8 sec_mode_supported;
227#endif
228} __attribute__((packed));
229
230
231struct i1480_cmd_mpi_write {
232 struct uwb_rccb rccb;
233 __le16 size;
234 u8 data[];
235};
236
237
238struct i1480_cmd_mpi_read {
239 struct uwb_rccb rccb;
240 __le16 size;
241 struct {
242 u8 page, offset;
243 } __attribute__((packed)) data[];
244} __attribute__((packed));
245
246
247struct i1480_evt_mpi_read {
248 struct uwb_rceb rceb;
249#ifdef i1480_RCEB_EXTENDED
250 __le16 wParamLength;
251#endif
252 u8 bResultCode;
253 __le16 size;
254 struct {
255 u8 page, offset, value;
256 } __attribute__((packed)) data[];
257} __attribute__((packed));
258
259
260#endif /* #ifndef __i1480_DFU_H__ */
diff --git a/drivers/uwb/i1480/dfu/mac.c b/drivers/uwb/i1480/dfu/mac.c
new file mode 100644
index 000000000000..2e4d8f07c165
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/mac.c
@@ -0,0 +1,527 @@
1/*
2 * Intel Wireless UWB Link 1480
3 * MAC Firmware upload implementation
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Implementation of the code for parsing the firmware file (extract
24 * the headers and binary code chunks) in the fw_*() functions. The
25 * code to upload pre and mac firmwares is the same, so it uses a
26 * common entry point in __mac_fw_upload(), which uses the i1480
27 * function pointers to push the firmware to the device.
28 */
29#include <linux/delay.h>
30#include <linux/firmware.h>
31#include <linux/uwb.h>
32#include "i1480-dfu.h"
33
34#define D_LOCAL 0
35#include <linux/uwb/debug.h>
36
37/*
38 * Descriptor for a continuous segment of MAC fw data
39 */
40struct fw_hdr {
41 unsigned long address;
42 size_t length;
43 const u32 *bin;
44 struct fw_hdr *next;
45};
46
47
48/* Free a chain of firmware headers */
49static
50void fw_hdrs_free(struct fw_hdr *hdr)
51{
52 struct fw_hdr *next;
53
54 while (hdr) {
55 next = hdr->next;
56 kfree(hdr);
57 hdr = next;
58 }
59}
60
61
62/* Fill a firmware header descriptor from a memory buffer */
63static
64int fw_hdr_load(struct i1480 *i1480, struct fw_hdr *hdr, unsigned hdr_cnt,
65 const char *_data, const u32 *data_itr, const u32 *data_top)
66{
67 size_t hdr_offset = (const char *) data_itr - _data;
68 size_t remaining_size = (void *) data_top - (void *) data_itr;
69 if (data_itr + 2 > data_top) {
70 dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in header at "
71 "offset %zu, limit %zu\n",
72 hdr_cnt, hdr_offset,
73 (const char *) data_itr + 2 - _data,
74 (const char *) data_top - _data);
75 return -EINVAL;
76 }
77 hdr->next = NULL;
78 hdr->address = le32_to_cpu(*data_itr++);
79 hdr->length = le32_to_cpu(*data_itr++);
80 hdr->bin = data_itr;
81 if (hdr->length > remaining_size) {
82 dev_err(i1480->dev, "fw hdr #%u/%zu: EOF reached in data; "
83 "chunk too long (%zu bytes), only %zu left\n",
84 hdr_cnt, hdr_offset, hdr->length, remaining_size);
85 return -EINVAL;
86 }
87 return 0;
88}
89
90
91/**
92 * Get a buffer where the firmware is supposed to be and create a
93 * chain of headers linking them together.
94 *
95 * @phdr: where to place the pointer to the first header (headers link
96 * to the next via the @hdr->next ptr); need to free the whole
97 * chain when done.
98 *
99 * @_data: Pointer to the data buffer.
100 *
101 * @_data_size: Size of the data buffer (bytes); data size has to be a
102 * multiple of 4. Function will fail if not.
103 *
104 * Goes over the whole binary blob; reads the first chunk and creates
105 * a fw hdr from it (which points to where the data is in @_data and
106 * the length of the chunk); then goes on to the next chunk until
107 * done. Each header is linked to the next.
108 */
109static
110int fw_hdrs_load(struct i1480 *i1480, struct fw_hdr **phdr,
111 const char *_data, size_t data_size)
112{
113 int result;
114 unsigned hdr_cnt = 0;
115 u32 *data = (u32 *) _data, *data_itr, *data_top;
116 struct fw_hdr *hdr, **prev_hdr = phdr;
117
118 result = -EINVAL;
119 /* Check size is ok and pointer is aligned */
120 if (data_size % sizeof(u32) != 0)
121 goto error;
122 if ((unsigned long) _data % sizeof(u16) != 0)
123 goto error;
124 *phdr = NULL;
125 data_itr = data;
126 data_top = (u32 *) (_data + data_size);
127 while (data_itr < data_top) {
128 result = -ENOMEM;
129 hdr = kmalloc(sizeof(*hdr), GFP_KERNEL);
130 if (hdr == NULL) {
131 dev_err(i1480->dev, "Cannot allocate fw header "
132 "for chunk #%u\n", hdr_cnt);
133 goto error_alloc;
134 }
135 result = fw_hdr_load(i1480, hdr, hdr_cnt,
136 _data, data_itr, data_top);
137 if (result < 0)
138 goto error_load;
139 data_itr += 2 + hdr->length;
140 *prev_hdr = hdr;
141 prev_hdr = &hdr->next;
142 hdr_cnt++;
143 };
144 *prev_hdr = NULL;
145 return 0;
146
147error_load:
148 kfree(hdr);
149error_alloc:
150 fw_hdrs_free(*phdr);
151error:
152 return result;
153}
154
155
156/**
157 * Compares a chunk of fw with one in the devices's memory
158 *
159 * @i1480: Device instance
160 * @hdr: Pointer to the firmware chunk
161 * @returns: 0 if equal, < 0 errno on error. If > 0, it is the offset
162 * where the difference was found (plus one).
163 *
164 * Kind of dirty and simplistic, but does the trick in both the PCI
165 * and USB version. We do a quick[er] memcmp(), and if it fails, we do
166 * a byte-by-byte to find the offset.
167 */
168static
169ssize_t i1480_fw_cmp(struct i1480 *i1480, struct fw_hdr *hdr)
170{
171 ssize_t result = 0;
172 u32 src_itr = 0, cnt;
173 size_t size = hdr->length*sizeof(hdr->bin[0]);
174 size_t chunk_size;
175 u8 *bin = (u8 *) hdr->bin;
176
177 while (size > 0) {
178 chunk_size = size < i1480->buf_size ? size : i1480->buf_size;
179 result = i1480->read(i1480, hdr->address + src_itr, chunk_size);
180 if (result < 0) {
181 dev_err(i1480->dev, "error reading for verification: "
182 "%zd\n", result);
183 goto error;
184 }
185 if (memcmp(i1480->cmd_buf, bin + src_itr, result)) {
186 u8 *buf = i1480->cmd_buf;
187 d_printf(2, i1480->dev,
188 "original data @ %p + %u, %zu bytes\n",
189 bin, src_itr, result);
190 d_dump(4, i1480->dev, bin + src_itr, result);
191 for (cnt = 0; cnt < result; cnt++)
192 if (bin[src_itr + cnt] != buf[cnt]) {
193 dev_err(i1480->dev, "byte failed at "
194 "src_itr %u cnt %u [0x%02x "
195 "vs 0x%02x]\n", src_itr, cnt,
196 bin[src_itr + cnt], buf[cnt]);
197 result = src_itr + cnt + 1;
198 goto cmp_failed;
199 }
200 }
201 src_itr += result;
202 size -= result;
203 }
204 result = 0;
205error:
206cmp_failed:
207 return result;
208}
209
210
211/**
212 * Writes firmware headers to the device.
213 *
214 * @prd: PRD instance
215 * @hdr: Processed firmware
216 * @returns: 0 if ok, < 0 errno on error.
217 */
218static
219int mac_fw_hdrs_push(struct i1480 *i1480, struct fw_hdr *hdr,
220 const char *fw_name, const char *fw_tag)
221{
222 struct device *dev = i1480->dev;
223 ssize_t result = 0;
224 struct fw_hdr *hdr_itr;
225 int verif_retry_count;
226
227 d_fnstart(3, dev, "(%p, %p)\n", i1480, hdr);
228 /* Now, header by header, push them to the hw */
229 for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) {
230 verif_retry_count = 0;
231retry:
232 dev_dbg(dev, "fw chunk (%zu @ 0x%08lx)\n",
233 hdr_itr->length * sizeof(hdr_itr->bin[0]),
234 hdr_itr->address);
235 result = i1480->write(i1480, hdr_itr->address, hdr_itr->bin,
236 hdr_itr->length*sizeof(hdr_itr->bin[0]));
237 if (result < 0) {
238 dev_err(dev, "%s fw '%s': write failed (%zuB @ 0x%lx):"
239 " %zd\n", fw_tag, fw_name,
240 hdr_itr->length * sizeof(hdr_itr->bin[0]),
241 hdr_itr->address, result);
242 break;
243 }
244 result = i1480_fw_cmp(i1480, hdr_itr);
245 if (result < 0) {
246 dev_err(dev, "%s fw '%s': verification read "
247 "failed (%zuB @ 0x%lx): %zd\n",
248 fw_tag, fw_name,
249 hdr_itr->length * sizeof(hdr_itr->bin[0]),
250 hdr_itr->address, result);
251 break;
252 }
253 if (result > 0) { /* Offset where it failed + 1 */
254 result--;
255 dev_err(dev, "%s fw '%s': WARNING: verification "
256 "failed at 0x%lx: retrying\n",
257 fw_tag, fw_name, hdr_itr->address + result);
258 if (++verif_retry_count < 3)
259 goto retry; /* write this block again! */
260 dev_err(dev, "%s fw '%s': verification failed at 0x%lx: "
261 "tried %d times\n", fw_tag, fw_name,
262 hdr_itr->address + result, verif_retry_count);
263 result = -EINVAL;
264 break;
265 }
266 }
267 d_fnend(3, dev, "(%zd)\n", result);
268 return result;
269}
270
271
272/** Puts the device in firmware upload mode.*/
273static
274int mac_fw_upload_enable(struct i1480 *i1480)
275{
276 int result;
277 u32 reg = 0x800000c0;
278 u32 *buffer = (u32 *)i1480->cmd_buf;
279
280 if (i1480->hw_rev > 1)
281 reg = 0x8000d0d4;
282 result = i1480->read(i1480, reg, sizeof(u32));
283 if (result < 0)
284 goto error_cmd;
285 *buffer &= ~i1480_FW_UPLOAD_MODE_MASK;
286 result = i1480->write(i1480, reg, buffer, sizeof(u32));
287 if (result < 0)
288 goto error_cmd;
289 return 0;
290error_cmd:
291 dev_err(i1480->dev, "can't enable fw upload mode: %d\n", result);
292 return result;
293}
294
295
296/** Gets the device out of firmware upload mode. */
297static
298int mac_fw_upload_disable(struct i1480 *i1480)
299{
300 int result;
301 u32 reg = 0x800000c0;
302 u32 *buffer = (u32 *)i1480->cmd_buf;
303
304 if (i1480->hw_rev > 1)
305 reg = 0x8000d0d4;
306 result = i1480->read(i1480, reg, sizeof(u32));
307 if (result < 0)
308 goto error_cmd;
309 *buffer |= i1480_FW_UPLOAD_MODE_MASK;
310 result = i1480->write(i1480, reg, buffer, sizeof(u32));
311 if (result < 0)
312 goto error_cmd;
313 return 0;
314error_cmd:
315 dev_err(i1480->dev, "can't disable fw upload mode: %d\n", result);
316 return result;
317}
318
319
320
321/**
322 * Generic function for uploading a MAC firmware.
323 *
324 * @i1480: Device instance
325 * @fw_name: Name of firmware file to upload.
326 * @fw_tag: Name of the firmware type (for messages)
327 * [eg: MAC, PRE]
328 * @do_wait: Wait for device to emit initialization done message (0
329 * for PRE fws, 1 for MAC fws).
330 * @returns: 0 if ok, < 0 errno on error.
331 */
332static
333int __mac_fw_upload(struct i1480 *i1480, const char *fw_name,
334 const char *fw_tag)
335{
336 int result;
337 const struct firmware *fw;
338 struct fw_hdr *fw_hdrs;
339
340 d_fnstart(3, i1480->dev, "(%p, %s, %s)\n", i1480, fw_name, fw_tag);
341 result = request_firmware(&fw, fw_name, i1480->dev);
342 if (result < 0) /* Up to caller to complain on -ENOENT */
343 goto out;
344 d_printf(3, i1480->dev, "%s fw '%s': uploading\n", fw_tag, fw_name);
345 result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size);
346 if (result < 0) {
347 dev_err(i1480->dev, "%s fw '%s': failed to parse firmware "
348 "file: %d\n", fw_tag, fw_name, result);
349 goto out_release;
350 }
351 result = mac_fw_upload_enable(i1480);
352 if (result < 0)
353 goto out_hdrs_release;
354 result = mac_fw_hdrs_push(i1480, fw_hdrs, fw_name, fw_tag);
355 mac_fw_upload_disable(i1480);
356out_hdrs_release:
357 if (result >= 0)
358 dev_info(i1480->dev, "%s fw '%s': uploaded\n", fw_tag, fw_name);
359 else
360 dev_err(i1480->dev, "%s fw '%s': failed to upload (%d), "
361 "power cycle device\n", fw_tag, fw_name, result);
362 fw_hdrs_free(fw_hdrs);
363out_release:
364 release_firmware(fw);
365out:
366 d_fnend(3, i1480->dev, "(%p, %s, %s) = %d\n", i1480, fw_name, fw_tag,
367 result);
368 return result;
369}
370
371
372/**
373 * Upload a pre-PHY firmware
374 *
375 */
376int i1480_pre_fw_upload(struct i1480 *i1480)
377{
378 int result;
379 result = __mac_fw_upload(i1480, i1480->pre_fw_name, "PRE");
380 if (result == 0)
381 msleep(400);
382 return result;
383}
384
385
386/**
387 * Reset a the MAC and PHY
388 *
389 * @i1480: Device's instance
390 * @returns: 0 if ok, < 0 errno code on error
391 *
392 * We put the command on kmalloc'ed memory as some arches cannot do
393 * USB from the stack. The reply event is copied from an stage buffer,
394 * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
395 *
396 * We issue the reset to make sure the UWB controller reinits the PHY;
397 * this way we can now if the PHY init went ok.
398 */
399static
400int i1480_cmd_reset(struct i1480 *i1480)
401{
402 int result;
403 struct uwb_rccb *cmd = (void *) i1480->cmd_buf;
404 struct i1480_evt_reset {
405 struct uwb_rceb rceb;
406 u8 bResultCode;
407 } __attribute__((packed)) *reply = (void *) i1480->evt_buf;
408
409 result = -ENOMEM;
410 cmd->bCommandType = UWB_RC_CET_GENERAL;
411 cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET);
412 reply->rceb.bEventType = UWB_RC_CET_GENERAL;
413 reply->rceb.wEvent = UWB_RC_CMD_RESET;
414 result = i1480_cmd(i1480, "RESET", sizeof(*cmd), sizeof(*reply));
415 if (result < 0)
416 goto out;
417 if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
418 dev_err(i1480->dev, "RESET: command execution failed: %u\n",
419 reply->bResultCode);
420 result = -EIO;
421 }
422out:
423 return result;
424
425}
426
427
428/* Wait for the MAC FW to start running */
429static
430int i1480_fw_is_running_q(struct i1480 *i1480)
431{
432 int cnt = 0;
433 int result;
434 u32 *val = (u32 *) i1480->cmd_buf;
435
436 d_fnstart(3, i1480->dev, "(i1480 %p)\n", i1480);
437 for (cnt = 0; cnt < 10; cnt++) {
438 msleep(100);
439 result = i1480->read(i1480, 0x80080000, 4);
440 if (result < 0) {
441 dev_err(i1480->dev, "Can't read 0x8008000: %d\n", result);
442 goto out;
443 }
444 if (*val == 0x55555555UL) /* fw running? cool */
445 goto out;
446 }
447 dev_err(i1480->dev, "Timed out waiting for fw to start\n");
448 result = -ETIMEDOUT;
449out:
450 d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result);
451 return result;
452
453}
454
455
456/**
457 * Upload MAC firmware, wait for it to start
458 *
459 * @i1480: Device instance
460 * @fw_name: Name of the file that contains the firmware
461 *
462 * This has to be called after the pre fw has been uploaded (if
463 * there is any).
464 */
465int i1480_mac_fw_upload(struct i1480 *i1480)
466{
467 int result = 0, deprecated_name = 0;
468 struct i1480_rceb *rcebe = (void *) i1480->evt_buf;
469
470 d_fnstart(3, i1480->dev, "(%p)\n", i1480);
471 result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC");
472 if (result == -ENOENT) {
473 result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate,
474 "MAC");
475 deprecated_name = 1;
476 }
477 if (result < 0)
478 return result;
479 if (deprecated_name == 1)
480 dev_warn(i1480->dev,
481 "WARNING: firmware file name %s is deprecated, "
482 "please rename to %s\n",
483 i1480->mac_fw_name_deprecate, i1480->mac_fw_name);
484 result = i1480_fw_is_running_q(i1480);
485 if (result < 0)
486 goto error_fw_not_running;
487 result = i1480->rc_setup ? i1480->rc_setup(i1480) : 0;
488 if (result < 0) {
489 dev_err(i1480->dev, "Cannot setup after MAC fw upload: %d\n",
490 result);
491 goto error_setup;
492 }
493 result = i1480->wait_init_done(i1480); /* wait init'on */
494 if (result < 0) {
495 dev_err(i1480->dev, "MAC fw '%s': Initialization timed out "
496 "(%d)\n", i1480->mac_fw_name, result);
497 goto error_init_timeout;
498 }
499 /* verify we got the right initialization done event */
500 if (i1480->evt_result != sizeof(*rcebe)) {
501 dev_err(i1480->dev, "MAC fw '%s': initialization event returns "
502 "wrong size (%zu bytes vs %zu needed)\n",
503 i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe));
504 dump_bytes(i1480->dev, rcebe, min(i1480->evt_result, (ssize_t)32));
505 goto error_size;
506 }
507 result = -EIO;
508 if (i1480_rceb_check(i1480, &rcebe->rceb, NULL, 0, i1480_CET_VS1,
509 i1480_EVT_RM_INIT_DONE) < 0) {
510 dev_err(i1480->dev, "wrong initialization event 0x%02x/%04x/%02x "
511 "received; expected 0x%02x/%04x/00\n",
512 rcebe->rceb.bEventType, le16_to_cpu(rcebe->rceb.wEvent),
513 rcebe->rceb.bEventContext, i1480_CET_VS1,
514 i1480_EVT_RM_INIT_DONE);
515 goto error_init_timeout;
516 }
517 result = i1480_cmd_reset(i1480);
518 if (result < 0)
519 dev_err(i1480->dev, "MAC fw '%s': MBOA reset failed (%d)\n",
520 i1480->mac_fw_name, result);
521error_fw_not_running:
522error_init_timeout:
523error_size:
524error_setup:
525 d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result);
526 return result;
527}
diff --git a/drivers/uwb/i1480/dfu/phy.c b/drivers/uwb/i1480/dfu/phy.c
new file mode 100644
index 000000000000..3b1a87de8e63
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/phy.c
@@ -0,0 +1,203 @@
1/*
2 * Intel Wireless UWB Link 1480
3 * PHY parameters upload
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Code for uploading the PHY parameters to the PHY through the UWB
24 * Radio Control interface.
25 *
26 * We just send the data through the MPI interface using HWA-like
27 * commands and then reset the PHY to make sure it is ok.
28 */
29#include <linux/delay.h>
30#include <linux/device.h>
31#include <linux/firmware.h>
32#include <linux/usb/wusb.h>
33#include "i1480-dfu.h"
34
35
36/**
37 * Write a value array to an address of the MPI interface
38 *
39 * @i1480: Device descriptor
40 * @data: Data array to write
41 * @size: Size of the data array
42 * @returns: 0 if ok, < 0 errno code on error.
43 *
44 * The data array is organized into pairs:
45 *
46 * ADDRESS VALUE
47 *
48 * ADDRESS is BE 16 bit unsigned, VALUE 8 bit unsigned. Size thus has
49 * to be a multiple of three.
50 */
51static
52int i1480_mpi_write(struct i1480 *i1480, const void *data, size_t size)
53{
54 int result;
55 struct i1480_cmd_mpi_write *cmd = i1480->cmd_buf;
56 struct i1480_evt_confirm *reply = i1480->evt_buf;
57
58 BUG_ON(size > 480);
59 result = -ENOMEM;
60 cmd->rccb.bCommandType = i1480_CET_VS1;
61 cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_WRITE);
62 cmd->size = cpu_to_le16(size);
63 memcpy(cmd->data, data, size);
64 reply->rceb.bEventType = i1480_CET_VS1;
65 reply->rceb.wEvent = i1480_CMD_MPI_WRITE;
66 result = i1480_cmd(i1480, "MPI-WRITE", sizeof(*cmd) + size, sizeof(*reply));
67 if (result < 0)
68 goto out;
69 if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
70 dev_err(i1480->dev, "MPI-WRITE: command execution failed: %d\n",
71 reply->bResultCode);
72 result = -EIO;
73 }
74out:
75 return result;
76}
77
78
79/**
80 * Read a value array to from an address of the MPI interface
81 *
82 * @i1480: Device descriptor
83 * @data: where to place the read array
84 * @srcaddr: Where to read from
85 * @size: Size of the data read array
86 * @returns: 0 if ok, < 0 errno code on error.
87 *
88 * The command data array is organized into pairs ADDR0 ADDR1..., and
89 * the returned data in ADDR0 VALUE0 ADDR1 VALUE1...
90 *
91 * We generate the command array to be a sequential read and then
92 * rearrange the result.
93 *
94 * We use the i1480->cmd_buf for the command, i1480->evt_buf for the reply.
95 *
96 * As the reply has to fit in 512 bytes (i1480->evt_buffer), the max amount
97 * of values we can read is (512 - sizeof(*reply)) / 3
98 */
99static
100int i1480_mpi_read(struct i1480 *i1480, u8 *data, u16 srcaddr, size_t size)
101{
102 int result;
103 struct i1480_cmd_mpi_read *cmd = i1480->cmd_buf;
104 struct i1480_evt_mpi_read *reply = i1480->evt_buf;
105 unsigned cnt;
106
107 memset(i1480->cmd_buf, 0x69, 512);
108 memset(i1480->evt_buf, 0x69, 512);
109
110 BUG_ON(size > (i1480->buf_size - sizeof(*reply)) / 3);
111 result = -ENOMEM;
112 cmd->rccb.bCommandType = i1480_CET_VS1;
113 cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_READ);
114 cmd->size = cpu_to_le16(3*size);
115 for (cnt = 0; cnt < size; cnt++) {
116 cmd->data[cnt].page = (srcaddr + cnt) >> 8;
117 cmd->data[cnt].offset = (srcaddr + cnt) & 0xff;
118 }
119 reply->rceb.bEventType = i1480_CET_VS1;
120 reply->rceb.wEvent = i1480_CMD_MPI_READ;
121 result = i1480_cmd(i1480, "MPI-READ", sizeof(*cmd) + 2*size,
122 sizeof(*reply) + 3*size);
123 if (result < 0)
124 goto out;
125 if (reply->bResultCode != UWB_RC_RES_SUCCESS) {
126 dev_err(i1480->dev, "MPI-READ: command execution failed: %d\n",
127 reply->bResultCode);
128 result = -EIO;
129 }
130 for (cnt = 0; cnt < size; cnt++) {
131 if (reply->data[cnt].page != (srcaddr + cnt) >> 8)
132 dev_err(i1480->dev, "MPI-READ: page inconsistency at "
133 "index %u: expected 0x%02x, got 0x%02x\n", cnt,
134 (srcaddr + cnt) >> 8, reply->data[cnt].page);
135 if (reply->data[cnt].offset != ((srcaddr + cnt) & 0x00ff))
136 dev_err(i1480->dev, "MPI-READ: offset inconsistency at "
137 "index %u: expected 0x%02x, got 0x%02x\n", cnt,
138 (srcaddr + cnt) & 0x00ff,
139 reply->data[cnt].offset);
140 data[cnt] = reply->data[cnt].value;
141 }
142 result = 0;
143out:
144 return result;
145}
146
147
148/**
149 * Upload a PHY firmware, wait for it to start
150 *
151 * @i1480: Device instance
152 * @fw_name: Name of the file that contains the firmware
153 *
154 * We assume the MAC fw is up and running. This means we can use the
155 * MPI interface to write the PHY firmware. Once done, we issue an
156 * MBOA Reset, which will force the MAC to reset and reinitialize the
157 * PHY. If that works, we are ready to go.
158 *
159 * Max packet size for the MPI write is 512, so the max buffer is 480
160 * (which gives us 160 byte triads of MSB, LSB and VAL for the data).
161 */
162int i1480_phy_fw_upload(struct i1480 *i1480)
163{
164 int result;
165 const struct firmware *fw;
166 const char *data_itr, *data_top;
167 const size_t MAX_BLK_SIZE = 480; /* 160 triads */
168 size_t data_size;
169 u8 phy_stat;
170
171 result = request_firmware(&fw, i1480->phy_fw_name, i1480->dev);
172 if (result < 0)
173 goto out;
174 /* Loop writing data in chunks as big as possible until done. */
175 for (data_itr = fw->data, data_top = data_itr + fw->size;
176 data_itr < data_top; data_itr += MAX_BLK_SIZE) {
177 data_size = min(MAX_BLK_SIZE, (size_t) (data_top - data_itr));
178 result = i1480_mpi_write(i1480, data_itr, data_size);
179 if (result < 0)
180 goto error_mpi_write;
181 }
182 /* Read MPI page 0, offset 6; if 0, PHY was initialized correctly. */
183 result = i1480_mpi_read(i1480, &phy_stat, 0x0006, 1);
184 if (result < 0) {
185 dev_err(i1480->dev, "PHY: can't get status: %d\n", result);
186 goto error_mpi_status;
187 }
188 if (phy_stat != 0) {
189 result = -ENODEV;
190 dev_info(i1480->dev, "error, PHY not ready: %u\n", phy_stat);
191 goto error_phy_status;
192 }
193 dev_info(i1480->dev, "PHY fw '%s': uploaded\n", i1480->phy_fw_name);
194error_phy_status:
195error_mpi_status:
196error_mpi_write:
197 release_firmware(fw);
198 if (result < 0)
199 dev_err(i1480->dev, "PHY fw '%s': failed to upload (%d), "
200 "power cycle device\n", i1480->phy_fw_name, result);
201out:
202 return result;
203}
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
new file mode 100644
index 000000000000..98eeeff051aa
--- /dev/null
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -0,0 +1,500 @@
1/*
2 * Intel Wireless UWB Link 1480
3 * USB SKU firmware upload implementation
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * This driver will prepare the i1480 device to behave as a real
24 * Wireless USB HWA adaptor by uploading the firmware.
25 *
26 * When the device is connected or driver is loaded, i1480_usb_probe()
27 * is called--this will allocate and initialize the device structure,
28 * fill in the pointers to the common functions (read, write,
29 * wait_init_done and cmd for HWA command execution) and once that is
30 * done, call the common firmware uploading routine. Then clean up and
31 * return -ENODEV, as we don't attach to the device.
32 *
33 * The rest are the basic ops we implement that the fw upload code
34 * uses to do its job. All the ops in the common code are i1480->NAME,
35 * the functions are i1480_usb_NAME().
36 */
37#include <linux/module.h>
38#include <linux/version.h>
39#include <linux/usb.h>
40#include <linux/interrupt.h>
41#include <linux/delay.h>
42#include <linux/uwb.h>
43#include <linux/usb/wusb.h>
44#include <linux/usb/wusb-wa.h>
45#include "i1480-dfu.h"
46
47#define D_LOCAL 0
48#include <linux/uwb/debug.h>
49
50
51struct i1480_usb {
52 struct i1480 i1480;
53 struct usb_device *usb_dev;
54 struct usb_interface *usb_iface;
55 struct urb *neep_urb; /* URB for reading from EP1 */
56};
57
58
59static
60void i1480_usb_init(struct i1480_usb *i1480_usb)
61{
62 i1480_init(&i1480_usb->i1480);
63}
64
65
66static
67int i1480_usb_create(struct i1480_usb *i1480_usb, struct usb_interface *iface)
68{
69 struct usb_device *usb_dev = interface_to_usbdev(iface);
70 int result = -ENOMEM;
71
72 i1480_usb->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
73 i1480_usb->usb_iface = usb_get_intf(iface);
74 usb_set_intfdata(iface, i1480_usb); /* Bind the driver to iface0 */
75 i1480_usb->neep_urb = usb_alloc_urb(0, GFP_KERNEL);
76 if (i1480_usb->neep_urb == NULL)
77 goto error;
78 return 0;
79
80error:
81 usb_set_intfdata(iface, NULL);
82 usb_put_intf(iface);
83 usb_put_dev(usb_dev);
84 return result;
85}
86
87
88static
89void i1480_usb_destroy(struct i1480_usb *i1480_usb)
90{
91 usb_kill_urb(i1480_usb->neep_urb);
92 usb_free_urb(i1480_usb->neep_urb);
93 usb_set_intfdata(i1480_usb->usb_iface, NULL);
94 usb_put_intf(i1480_usb->usb_iface);
95 usb_put_dev(i1480_usb->usb_dev);
96}
97
98
99/**
100 * Write a buffer to a memory address in the i1480 device
101 *
102 * @i1480: i1480 instance
103 * @memory_address:
104 * Address where to write the data buffer to.
105 * @buffer: Buffer to the data
106 * @size: Size of the buffer [has to be < 512].
107 * @returns: 0 if ok, < 0 errno code on error.
108 *
109 * Data buffers to USB cannot be on the stack or in vmalloc'ed areas,
110 * so we copy it to the local i1480 buffer before proceeding. In any
111 * case, we have a max size we can send, soooo.
112 */
113static
114int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
115 const void *buffer, size_t size)
116{
117 int result = 0;
118 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
119 size_t buffer_size, itr = 0;
120
121 d_fnstart(3, i1480->dev, "(%p, 0x%08x, %p, %zu)\n",
122 i1480, memory_address, buffer, size);
123 BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
124 while (size > 0) {
125 buffer_size = size < i1480->buf_size ? size : i1480->buf_size;
126 memcpy(i1480->cmd_buf, buffer + itr, buffer_size);
127 result = usb_control_msg(
128 i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
129 0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
130 cpu_to_le16(memory_address & 0xffff),
131 cpu_to_le16((memory_address >> 16) & 0xffff),
132 i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */);
133 if (result < 0)
134 break;
135 d_printf(3, i1480->dev,
136 "wrote @ 0x%08x %u bytes (of %zu bytes requested)\n",
137 memory_address, result, buffer_size);
138 d_dump(4, i1480->dev, i1480->cmd_buf, result);
139 itr += result;
140 memory_address += result;
141 size -= result;
142 }
143 d_fnend(3, i1480->dev, "(%p, 0x%08x, %p, %zu) = %d\n",
144 i1480, memory_address, buffer, size, result);
145 return result;
146}
147
148
149/**
150 * Read a block [max size 512] of the device's memory to @i1480's buffer.
151 *
152 * @i1480: i1480 instance
153 * @memory_address:
154 * Address where to read from.
155 * @size: Size to read. Smaller than or equal to 512.
156 * @returns: >= 0 number of bytes written if ok, < 0 errno code on error.
157 *
158 * NOTE: if the memory address or block is incorrect, you might get a
159 * stall or a different memory read. Caller has to verify the
160 * memory address and size passed back in the @neh structure.
161 */
162static
163int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size)
164{
165 ssize_t result = 0, bytes = 0;
166 size_t itr, read_size = i1480->buf_size;
167 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
168
169 d_fnstart(3, i1480->dev, "(%p, 0x%08x, %zu)\n",
170 i1480, addr, size);
171 BUG_ON(size > i1480->buf_size);
172 BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */
173 BUG_ON(read_size > 512);
174
175 if (addr >= 0x8000d200 && addr < 0x8000d400) /* Yeah, HW quirk */
176 read_size = 4;
177
178 for (itr = 0; itr < size; itr += read_size) {
179 size_t itr_addr = addr + itr;
180 size_t itr_size = min(read_size, size - itr);
181 result = usb_control_msg(
182 i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0),
183 0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
184 cpu_to_le16(itr_addr & 0xffff),
185 cpu_to_le16((itr_addr >> 16) & 0xffff),
186 i1480->cmd_buf + itr, itr_size,
187 100 /* FIXME: arbitrary */);
188 if (result < 0) {
189 dev_err(i1480->dev, "%s: USB read error: %zd\n",
190 __func__, result);
191 goto out;
192 }
193 if (result != itr_size) {
194 result = -EIO;
195 dev_err(i1480->dev,
196 "%s: partial read got only %zu bytes vs %zu expected\n",
197 __func__, result, itr_size);
198 goto out;
199 }
200 bytes += result;
201 }
202 result = bytes;
203out:
204 d_fnend(3, i1480->dev, "(%p, 0x%08x, %zu) = %zd\n",
205 i1480, addr, size, result);
206 if (result > 0)
207 d_dump(4, i1480->dev, i1480->cmd_buf, result);
208 return result;
209}
210
211
212/**
213 * Callback for reads on the notification/event endpoint
214 *
215 * Just enables the completion read handler.
216 */
217static
218void i1480_usb_neep_cb(struct urb *urb)
219{
220 struct i1480 *i1480 = urb->context;
221 struct device *dev = i1480->dev;
222
223 switch (urb->status) {
224 case 0:
225 break;
226 case -ECONNRESET: /* Not an error, but a controlled situation; */
227 case -ENOENT: /* (we killed the URB)...so, no broadcast */
228 dev_dbg(dev, "NEEP: reset/noent %d\n", urb->status);
229 break;
230 case -ESHUTDOWN: /* going away! */
231 dev_dbg(dev, "NEEP: down %d\n", urb->status);
232 break;
233 default:
234 dev_err(dev, "NEEP: unknown status %d\n", urb->status);
235 break;
236 }
237 i1480->evt_result = urb->actual_length;
238 complete(&i1480->evt_complete);
239 return;
240}
241
242
243/**
244 * Wait for the MAC FW to initialize
245 *
246 * MAC FW sends a 0xfd/0101/00 notification to EP1 when done
247 * initializing. Get that notification into i1480->evt_buf; upper layer
248 * will verify it.
249 *
250 * Set i1480->evt_result with the result of getting the event or its
251 * size (if succesful).
252 *
253 * Delivers the data directly to i1480->evt_buf
254 */
255static
256int i1480_usb_wait_init_done(struct i1480 *i1480)
257{
258 int result;
259 struct device *dev = i1480->dev;
260 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
261 struct usb_endpoint_descriptor *epd;
262
263 d_fnstart(3, dev, "(%p)\n", i1480);
264 init_completion(&i1480->evt_complete);
265 i1480->evt_result = -EINPROGRESS;
266 epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
267 usb_fill_int_urb(i1480_usb->neep_urb, i1480_usb->usb_dev,
268 usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
269 i1480->evt_buf, i1480->buf_size,
270 i1480_usb_neep_cb, i1480, epd->bInterval);
271 result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
272 if (result < 0) {
273 dev_err(dev, "init done: cannot submit NEEP read: %d\n",
274 result);
275 goto error_submit;
276 }
277 /* Wait for the USB callback to get the data */
278 result = wait_for_completion_interruptible_timeout(
279 &i1480->evt_complete, HZ);
280 if (result <= 0) {
281 result = result == 0 ? -ETIMEDOUT : result;
282 goto error_wait;
283 }
284 usb_kill_urb(i1480_usb->neep_urb);
285 d_fnend(3, dev, "(%p) = 0\n", i1480);
286 return 0;
287
288error_wait:
289 usb_kill_urb(i1480_usb->neep_urb);
290error_submit:
291 i1480->evt_result = result;
292 d_fnend(3, dev, "(%p) = %d\n", i1480, result);
293 return result;
294}
295
296
297/**
298 * Generic function for issuing commands to the i1480
299 *
300 * @i1480: i1480 instance
301 * @cmd_name: Name of the command (for error messages)
302 * @cmd: Pointer to command buffer
303 * @cmd_size: Size of the command buffer
304 * @reply: Buffer for the reply event
305 * @reply_size: Expected size back (including RCEB); the reply buffer
306 * is assumed to be as big as this.
307 * @returns: >= 0 size of the returned event data if ok,
308 * < 0 errno code on error.
309 *
310 * Arms the NE handle, issues the command to the device and checks the
311 * basics of the reply event.
312 */
313static
314int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size)
315{
316 int result;
317 struct device *dev = i1480->dev;
318 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480);
319 struct usb_endpoint_descriptor *epd;
320 struct uwb_rccb *cmd = i1480->cmd_buf;
321 u8 iface_no;
322
323 d_fnstart(3, dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size);
324 /* Post a read on the notification & event endpoint */
325 iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber;
326 epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc;
327 usb_fill_int_urb(
328 i1480_usb->neep_urb, i1480_usb->usb_dev,
329 usb_rcvintpipe(i1480_usb->usb_dev, epd->bEndpointAddress),
330 i1480->evt_buf, i1480->buf_size,
331 i1480_usb_neep_cb, i1480, epd->bInterval);
332 result = usb_submit_urb(i1480_usb->neep_urb, GFP_KERNEL);
333 if (result < 0) {
334 dev_err(dev, "%s: cannot submit NEEP read: %d\n",
335 cmd_name, result);
336 goto error_submit_ep1;
337 }
338 /* Now post the command on EP0 */
339 result = usb_control_msg(
340 i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
341 WA_EXEC_RC_CMD,
342 USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
343 0, iface_no,
344 cmd, cmd_size,
345 100 /* FIXME: this is totally arbitrary */);
346 if (result < 0) {
347 dev_err(dev, "%s: control request failed: %d\n",
348 cmd_name, result);
349 goto error_submit_ep0;
350 }
351 d_fnend(3, dev, "(%p, %s, %zu) = %d\n",
352 i1480, cmd_name, cmd_size, result);
353 return result;
354
355error_submit_ep0:
356 usb_kill_urb(i1480_usb->neep_urb);
357error_submit_ep1:
358 d_fnend(3, dev, "(%p, %s, %zu) = %d\n",
359 i1480, cmd_name, cmd_size, result);
360 return result;
361}
362
363
364/*
365 * Probe a i1480 device for uploading firmware.
366 *
367 * We attach only to interface #0, which is the radio control interface.
368 */
369static
370int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
371{
372 struct i1480_usb *i1480_usb;
373 struct i1480 *i1480;
374 struct device *dev = &iface->dev;
375 int result;
376
377 result = -ENODEV;
378 if (iface->cur_altsetting->desc.bInterfaceNumber != 0) {
379 dev_dbg(dev, "not attaching to iface %d\n",
380 iface->cur_altsetting->desc.bInterfaceNumber);
381 goto error;
382 }
383 if (iface->num_altsetting > 1
384 && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) {
385 /* Need altsetting #1 [HW QUIRK] or EP1 won't work */
386 result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
387 if (result < 0)
388 dev_warn(dev,
389 "can't set altsetting 1 on iface 0: %d\n",
390 result);
391 }
392
393 result = -ENOMEM;
394 i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
395 if (i1480_usb == NULL) {
396 dev_err(dev, "Unable to allocate instance\n");
397 goto error;
398 }
399 i1480_usb_init(i1480_usb);
400
401 i1480 = &i1480_usb->i1480;
402 i1480->buf_size = 512;
403 i1480->cmd_buf = kmalloc(2 * i1480->buf_size, GFP_KERNEL);
404 if (i1480->cmd_buf == NULL) {
405 dev_err(dev, "Cannot allocate transfer buffers\n");
406 result = -ENOMEM;
407 goto error_buf_alloc;
408 }
409 i1480->evt_buf = i1480->cmd_buf + i1480->buf_size;
410
411 result = i1480_usb_create(i1480_usb, iface);
412 if (result < 0) {
413 dev_err(dev, "Cannot create instance: %d\n", result);
414 goto error_create;
415 }
416
417 /* setup the fops and upload the firmare */
418 i1480->pre_fw_name = "i1480-pre-phy-0.0.bin";
419 i1480->mac_fw_name = "i1480-usb-0.0.bin";
420 i1480->mac_fw_name_deprecate = "ptc-0.0.bin";
421 i1480->phy_fw_name = "i1480-phy-0.0.bin";
422 i1480->dev = &iface->dev;
423 i1480->write = i1480_usb_write;
424 i1480->read = i1480_usb_read;
425 i1480->rc_setup = NULL;
426 i1480->wait_init_done = i1480_usb_wait_init_done;
427 i1480->cmd = i1480_usb_cmd;
428
429 result = i1480_fw_upload(&i1480_usb->i1480); /* the real thing */
430 if (result >= 0) {
431 usb_reset_device(i1480_usb->usb_dev);
432 result = -ENODEV; /* we don't want to bind to the iface */
433 }
434 i1480_usb_destroy(i1480_usb);
435error_create:
436 kfree(i1480->cmd_buf);
437error_buf_alloc:
438 kfree(i1480_usb);
439error:
440 return result;
441}
442
443#define i1480_USB_DEV(v, p) \
444{ \
445 .match_flags = USB_DEVICE_ID_MATCH_DEVICE \
446 | USB_DEVICE_ID_MATCH_DEV_INFO \
447 | USB_DEVICE_ID_MATCH_INT_INFO, \
448 .idVendor = (v), \
449 .idProduct = (p), \
450 .bDeviceClass = 0xff, \
451 .bDeviceSubClass = 0xff, \
452 .bDeviceProtocol = 0xff, \
453 .bInterfaceClass = 0xff, \
454 .bInterfaceSubClass = 0xff, \
455 .bInterfaceProtocol = 0xff, \
456}
457
458
459/** USB device ID's that we handle */
460static struct usb_device_id i1480_usb_id_table[] = {
461 i1480_USB_DEV(0x8086, 0xdf3b),
462 i1480_USB_DEV(0x15a9, 0x0005),
463 i1480_USB_DEV(0x07d1, 0x3802),
464 i1480_USB_DEV(0x050d, 0x305a),
465 i1480_USB_DEV(0x3495, 0x3007),
466 {},
467};
468MODULE_DEVICE_TABLE(usb, i1480_usb_id_table);
469
470
471static struct usb_driver i1480_dfu_driver = {
472 .name = "i1480-dfu-usb",
473 .id_table = i1480_usb_id_table,
474 .probe = i1480_usb_probe,
475 .disconnect = NULL,
476};
477
478
479/*
480 * Initialize the i1480 DFU driver.
481 *
482 * We also need to register our function for guessing event sizes.
483 */
484static int __init i1480_dfu_driver_init(void)
485{
486 return usb_register(&i1480_dfu_driver);
487}
488module_init(i1480_dfu_driver_init);
489
490
491static void __exit i1480_dfu_driver_exit(void)
492{
493 usb_deregister(&i1480_dfu_driver);
494}
495module_exit(i1480_dfu_driver_exit);
496
497
498MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
499MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB");
500MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/i1480-est.c b/drivers/uwb/i1480/i1480-est.c
new file mode 100644
index 000000000000..7bf8c6febae7
--- /dev/null
+++ b/drivers/uwb/i1480/i1480-est.c
@@ -0,0 +1,99 @@
1/*
2 * Intel Wireless UWB Link 1480
3 * Event Size tables for Wired Adaptors
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/usb.h>
29#include <linux/uwb.h>
30#include "dfu/i1480-dfu.h"
31
32
33/** Event size table for wEvents 0x00XX */
34static struct uwb_est_entry i1480_est_fd00[] = {
35 /* Anybody expecting this response has to use
36 * neh->extra_size to specify the real size that will
37 * come back. */
38 [i1480_EVT_CONFIRM] = { .size = sizeof(struct i1480_evt_confirm) },
39 [i1480_CMD_SET_IP_MAS] = { .size = sizeof(struct i1480_evt_confirm) },
40#ifdef i1480_RCEB_EXTENDED
41 [0x09] = {
42 .size = sizeof(struct i1480_rceb),
43 .offset = 1 + offsetof(struct i1480_rceb, wParamLength),
44 },
45#endif
46};
47
48/** Event size table for wEvents 0x01XX */
49static struct uwb_est_entry i1480_est_fd01[] = {
50 [0xff & i1480_EVT_RM_INIT_DONE] = { .size = sizeof(struct i1480_rceb) },
51 [0xff & i1480_EVT_DEV_ADD] = { .size = sizeof(struct i1480_rceb) + 9 },
52 [0xff & i1480_EVT_DEV_RM] = { .size = sizeof(struct i1480_rceb) + 9 },
53 [0xff & i1480_EVT_DEV_ID_CHANGE] = {
54 .size = sizeof(struct i1480_rceb) + 2 },
55};
56
57static int i1480_est_init(void)
58{
59 int result = uwb_est_register(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
60 i1480_est_fd00,
61 ARRAY_SIZE(i1480_est_fd00));
62 if (result < 0) {
63 printk(KERN_ERR "Can't register EST table fd00: %d\n", result);
64 return result;
65 }
66 result = uwb_est_register(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b,
67 i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01));
68 if (result < 0) {
69 printk(KERN_ERR "Can't register EST table fd01: %d\n", result);
70 return result;
71 }
72 return 0;
73}
74module_init(i1480_est_init);
75
76static void i1480_est_exit(void)
77{
78 uwb_est_unregister(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
79 i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00));
80 uwb_est_unregister(i1480_CET_VS1, 0x01, 0x8086, 0x0c3b,
81 i1480_est_fd01, ARRAY_SIZE(i1480_est_fd01));
82}
83module_exit(i1480_est_exit);
84
85MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
86MODULE_DESCRIPTION("i1480's Vendor Specific Event Size Tables");
87MODULE_LICENSE("GPL");
88
89/**
90 * USB device ID's that we handle
91 *
92 * [so we are loaded when this kind device is connected]
93 */
94static struct usb_device_id i1480_est_id_table[] = {
95 { USB_DEVICE(0x8086, 0xdf3b), },
96 { USB_DEVICE(0x8086, 0x0c3b), },
97 { },
98};
99MODULE_DEVICE_TABLE(usb, i1480_est_id_table);
diff --git a/drivers/uwb/i1480/i1480-wlp.h b/drivers/uwb/i1480/i1480-wlp.h
new file mode 100644
index 000000000000..18a8b0e4567b
--- /dev/null
+++ b/drivers/uwb/i1480/i1480-wlp.h
@@ -0,0 +1,200 @@
1/*
2 * Intel 1480 Wireless UWB Link
3 * WLP specific definitions
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * FIXME: docs
25 */
26
27#ifndef __i1480_wlp_h__
28#define __i1480_wlp_h__
29
30#include <linux/spinlock.h>
31#include <linux/list.h>
32#include <linux/uwb.h>
33#include <linux/if_ether.h>
34#include <asm/byteorder.h>
35
36/* New simplified header format? */
37#undef WLP_HDR_FMT_2 /* FIXME: rename */
38
39/**
40 * Values of the Delivery ID & Type field when PCA or DRP
41 *
42 * The Delivery ID & Type field in the WLP TX header indicates whether
43 * the frame is PCA or DRP. This is done based on the high level bit of
44 * this field.
45 * We use this constant to test if the traffic is PCA or DRP as follows:
46 * if (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)
47 * this is DRP traffic
48 * else
49 * this is PCA traffic
50 */
51enum deliver_id_type_bit {
52 WLP_DRP = 8,
53};
54
55/**
56 * WLP TX header
57 *
58 * Indicates UWB/WLP-specific transmission parameters for a network
59 * packet.
60 */
61struct wlp_tx_hdr {
62 /* dword 0 */
63 struct uwb_dev_addr dstaddr;
64 u8 key_index;
65 u8 mac_params;
66 /* dword 1 */
67 u8 phy_params;
68#ifndef WLP_HDR_FMT_2
69 u8 reserved;
70 __le16 oui01; /* FIXME: not so sure if __le16 or u8[2] */
71 /* dword 2 */
72 u8 oui2; /* if all LE, it could be merged */
73 __le16 prid;
74#endif
75} __attribute__((packed));
76
77static inline int wlp_tx_hdr_delivery_id_type(const struct wlp_tx_hdr *hdr)
78{
79 return hdr->mac_params & 0x0f;
80}
81
82static inline int wlp_tx_hdr_ack_policy(const struct wlp_tx_hdr *hdr)
83{
84 return (hdr->mac_params >> 4) & 0x07;
85}
86
87static inline int wlp_tx_hdr_rts_cts(const struct wlp_tx_hdr *hdr)
88{
89 return (hdr->mac_params >> 7) & 0x01;
90}
91
92static inline void wlp_tx_hdr_set_delivery_id_type(struct wlp_tx_hdr *hdr, int id)
93{
94 hdr->mac_params = (hdr->mac_params & ~0x0f) | id;
95}
96
97static inline void wlp_tx_hdr_set_ack_policy(struct wlp_tx_hdr *hdr,
98 enum uwb_ack_pol policy)
99{
100 hdr->mac_params = (hdr->mac_params & ~0x70) | (policy << 4);
101}
102
103static inline void wlp_tx_hdr_set_rts_cts(struct wlp_tx_hdr *hdr, int rts_cts)
104{
105 hdr->mac_params = (hdr->mac_params & ~0x80) | (rts_cts << 7);
106}
107
108static inline enum uwb_phy_rate wlp_tx_hdr_phy_rate(const struct wlp_tx_hdr *hdr)
109{
110 return hdr->phy_params & 0x0f;
111}
112
113static inline int wlp_tx_hdr_tx_power(const struct wlp_tx_hdr *hdr)
114{
115 return (hdr->phy_params >> 4) & 0x0f;
116}
117
118static inline void wlp_tx_hdr_set_phy_rate(struct wlp_tx_hdr *hdr, enum uwb_phy_rate rate)
119{
120 hdr->phy_params = (hdr->phy_params & ~0x0f) | rate;
121}
122
123static inline void wlp_tx_hdr_set_tx_power(struct wlp_tx_hdr *hdr, int pwr)
124{
125 hdr->phy_params = (hdr->phy_params & ~0xf0) | (pwr << 4);
126}
127
128
129/**
130 * WLP RX header
131 *
132 * Provides UWB/WLP-specific transmission data for a received
133 * network packet.
134 */
135struct wlp_rx_hdr {
136 /* dword 0 */
137 struct uwb_dev_addr dstaddr;
138 struct uwb_dev_addr srcaddr;
139 /* dword 1 */
140 u8 LQI;
141 s8 RSSI;
142 u8 reserved3;
143#ifndef WLP_HDR_FMT_2
144 u8 oui0;
145 /* dword 2 */
146 __le16 oui12;
147 __le16 prid;
148#endif
149} __attribute__((packed));
150
151
152/** User configurable options for WLP */
153struct wlp_options {
154 struct mutex mutex; /* access to user configurable options*/
155 struct wlp_tx_hdr def_tx_hdr; /* default tx hdr */
156 u8 pca_base_priority;
157 u8 bw_alloc; /*index into bw_allocs[] for PCA/DRP reservations*/
158};
159
160
161static inline
162void wlp_options_init(struct wlp_options *options)
163{
164 mutex_init(&options->mutex);
165 wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, UWB_ACK_INM);
166 wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, 1);
167 /* FIXME: default to phy caps */
168 wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, UWB_PHY_RATE_480);
169#ifndef WLP_HDR_FMT_2
170 options->def_tx_hdr.prid = cpu_to_le16(0x0000);
171#endif
172}
173
174
175/* sysfs helpers */
176
177extern ssize_t uwb_pca_base_priority_store(struct wlp_options *,
178 const char *, size_t);
179extern ssize_t uwb_pca_base_priority_show(const struct wlp_options *, char *);
180extern ssize_t uwb_bw_alloc_store(struct wlp_options *, const char *, size_t);
181extern ssize_t uwb_bw_alloc_show(const struct wlp_options *, char *);
182extern ssize_t uwb_ack_policy_store(struct wlp_options *,
183 const char *, size_t);
184extern ssize_t uwb_ack_policy_show(const struct wlp_options *, char *);
185extern ssize_t uwb_rts_cts_store(struct wlp_options *, const char *, size_t);
186extern ssize_t uwb_rts_cts_show(const struct wlp_options *, char *);
187extern ssize_t uwb_phy_rate_store(struct wlp_options *, const char *, size_t);
188extern ssize_t uwb_phy_rate_show(const struct wlp_options *, char *);
189
190
191/** Simple bandwidth allocation (temporary and too simple) */
192struct wlp_bw_allocs {
193 const char *name;
194 struct {
195 u8 mask, stream;
196 } tx, rx;
197};
198
199
200#endif /* #ifndef __i1480_wlp_h__ */
diff --git a/drivers/uwb/i1480/i1480u-wlp/Makefile b/drivers/uwb/i1480/i1480u-wlp/Makefile
new file mode 100644
index 000000000000..fe6709b8e68b
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/Makefile
@@ -0,0 +1,8 @@
1obj-$(CONFIG_UWB_I1480U_WLP) += i1480u-wlp.o
2
3i1480u-wlp-objs := \
4 lc.o \
5 netdev.o \
6 rx.o \
7 sysfs.o \
8 tx.o
diff --git a/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
new file mode 100644
index 000000000000..5f1b2951bb83
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/i1480u-wlp.h
@@ -0,0 +1,284 @@
1/*
2 * Intel 1480 Wireless UWB Link USB
3 * Header formats, constants, general internal interfaces
4 *
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * This is not an standard interface.
25 *
26 * FIXME: docs
27 *
28 * i1480u-wlp is pretty simple: two endpoints, one for tx, one for
29 * rx. rx is polled. Network packets (ethernet, whatever) are wrapped
30 * in i1480 TX or RX headers (for sending over the air), and these
31 * packets are wrapped in UNTD headers (for sending to the WLP UWB
32 * controller).
33 *
34 * UNTD packets (UNTD hdr + i1480 hdr + network packet) packets
35 * cannot be bigger than i1480u_MAX_FRG_SIZE. When this happens, the
36 * i1480 packet is broken in chunks/packets:
37 *
38 * UNTD-1st.hdr + i1480.hdr + payload
39 * UNTD-next.hdr + payload
40 * ...
41 * UNTD-last.hdr + payload
42 *
43 * so that each packet is smaller or equal than i1480u_MAX_FRG_SIZE.
44 *
45 * All HW structures and bitmaps are little endian, so we need to play
46 * ugly tricks when defining bitfields. Hoping for the day GCC
47 * implements __attribute__((endian(1234))).
48 *
49 * FIXME: ROADMAP to the whole implementation
50 */
51
52#ifndef __i1480u_wlp_h__
53#define __i1480u_wlp_h__
54
55#include <linux/usb.h>
56#include <linux/netdevice.h>
57#include <linux/uwb.h> /* struct uwb_rc, struct uwb_notifs_handler */
58#include <linux/wlp.h>
59#include "../i1480-wlp.h"
60
61#undef i1480u_FLOW_CONTROL /* Enable flow control code */
62
63/**
64 * Basic flow control
65 */
66enum {
67 i1480u_TX_INFLIGHT_MAX = 1000,
68 i1480u_TX_INFLIGHT_THRESHOLD = 100,
69};
70
71/** Maximum size of a transaction that we can tx/rx */
72enum {
73 /* Maximum packet size computed as follows: max UNTD header (8) +
74 * i1480 RX header (8) + max Ethernet header and payload (4096) +
75 * Padding added by skb_reserve (2) to make post Ethernet payload
76 * start on 16 byte boundary*/
77 i1480u_MAX_RX_PKT_SIZE = 4114,
78 i1480u_MAX_FRG_SIZE = 512,
79 i1480u_RX_BUFS = 9,
80};
81
82
83/**
84 * UNTD packet type
85 *
86 * We need to fragment any payload whose UNTD packet is going to be
87 * bigger than i1480u_MAX_FRG_SIZE.
88 */
89enum i1480u_pkt_type {
90 i1480u_PKT_FRAG_1ST = 0x1,
91 i1480u_PKT_FRAG_NXT = 0x0,
92 i1480u_PKT_FRAG_LST = 0x2,
93 i1480u_PKT_FRAG_CMP = 0x3
94};
95enum {
96 i1480u_PKT_NONE = 0x4,
97};
98
99/** USB Network Transfer Descriptor - common */
100struct untd_hdr {
101 u8 type;
102 __le16 len;
103} __attribute__((packed));
104
105static inline enum i1480u_pkt_type untd_hdr_type(const struct untd_hdr *hdr)
106{
107 return hdr->type & 0x03;
108}
109
110static inline int untd_hdr_rx_tx(const struct untd_hdr *hdr)
111{
112 return (hdr->type >> 2) & 0x01;
113}
114
115static inline void untd_hdr_set_type(struct untd_hdr *hdr, enum i1480u_pkt_type type)
116{
117 hdr->type = (hdr->type & ~0x03) | type;
118}
119
120static inline void untd_hdr_set_rx_tx(struct untd_hdr *hdr, int rx_tx)
121{
122 hdr->type = (hdr->type & ~0x04) | (rx_tx << 2);
123}
124
125
126/**
127 * USB Network Transfer Descriptor - Complete Packet
128 *
129 * This is for a packet that is smaller (header + payload) than
130 * i1480u_MAX_FRG_SIZE.
131 *
132 * @hdr.total_len is the size of the payload; the payload doesn't
133 * count this header nor the padding, but includes the size of i1480
134 * header.
135 */
136struct untd_hdr_cmp {
137 struct untd_hdr hdr;
138 u8 padding;
139} __attribute__((packed));
140
141
142/**
143 * USB Network Transfer Descriptor - First fragment
144 *
145 * @hdr.len is the size of the *whole packet* (excluding UNTD
146 * headers); @fragment_len is the size of the payload (excluding UNTD
147 * headers, but including i1480 headers).
148 */
149struct untd_hdr_1st {
150 struct untd_hdr hdr;
151 __le16 fragment_len;
152 u8 padding[3];
153} __attribute__((packed));
154
155
156/**
157 * USB Network Transfer Descriptor - Next / Last [Rest]
158 *
159 * @hdr.len is the size of the payload, not including headrs.
160 */
161struct untd_hdr_rst {
162 struct untd_hdr hdr;
163 u8 padding;
164} __attribute__((packed));
165
166
167/**
168 * Transmission context
169 *
170 * Wraps all the stuff needed to track a pending/active tx
171 * operation.
172 */
173struct i1480u_tx {
174 struct list_head list_node;
175 struct i1480u *i1480u;
176 struct urb *urb;
177
178 struct sk_buff *skb;
179 struct wlp_tx_hdr *wlp_tx_hdr;
180
181 void *buf; /* if NULL, no new buf was used */
182 size_t buf_size;
183};
184
185/**
186 * Basic flow control
187 *
188 * We maintain a basic flow control counter. "count" how many TX URBs are
189 * outstanding. Only allow "max"
190 * TX URBs to be outstanding. If this value is reached the queue will be
191 * stopped. The queue will be restarted when there are
192 * "threshold" URBs outstanding.
193 * Maintain a counter of how many time the TX queue needed to be restarted
194 * due to the "max" being exceeded and the "threshold" reached again. The
195 * timestamp "restart_ts" is to keep track from when the counter was last
196 * queried (see sysfs handling of file wlp_tx_inflight).
197 */
198struct i1480u_tx_inflight {
199 atomic_t count;
200 unsigned long max;
201 unsigned long threshold;
202 unsigned long restart_ts;
203 atomic_t restart_count;
204};
205
206/**
207 * Instance of a i1480u WLP interface
208 *
209 * Keeps references to the USB device that wraps it, as well as it's
210 * interface and associated UWB host controller. As well, it also
211 * keeps a link to the netdevice for integration into the networking
212 * stack.
213 * We maintian separate error history for the tx and rx endpoints because
214 * the implementation does not rely on locking - having one shared
215 * structure between endpoints may cause problems. Adding locking to the
216 * implementation will have higher cost than adding a separate structure.
217 */
218struct i1480u {
219 struct usb_device *usb_dev;
220 struct usb_interface *usb_iface;
221 struct net_device *net_dev;
222
223 spinlock_t lock;
224 struct net_device_stats stats;
225
226 /* RX context handling */
227 struct sk_buff *rx_skb;
228 struct uwb_dev_addr rx_srcaddr;
229 size_t rx_untd_pkt_size;
230 struct i1480u_rx_buf {
231 struct i1480u *i1480u; /* back pointer */
232 struct urb *urb;
233 struct sk_buff *data; /* i1480u_MAX_RX_PKT_SIZE each */
234 } rx_buf[i1480u_RX_BUFS]; /* N bufs */
235
236 spinlock_t tx_list_lock; /* TX context */
237 struct list_head tx_list;
238 u8 tx_stream;
239
240 struct stats lqe_stats, rssi_stats; /* radio statistics */
241
242 /* Options we can set from sysfs */
243 struct wlp_options options;
244 struct uwb_notifs_handler uwb_notifs_handler;
245 struct edc tx_errors;
246 struct edc rx_errors;
247 struct wlp wlp;
248#ifdef i1480u_FLOW_CONTROL
249 struct urb *notif_urb;
250 struct edc notif_edc; /* error density counter */
251 u8 notif_buffer[1];
252#endif
253 struct i1480u_tx_inflight tx_inflight;
254};
255
256/* Internal interfaces */
257extern void i1480u_rx_cb(struct urb *urb);
258extern int i1480u_rx_setup(struct i1480u *);
259extern void i1480u_rx_release(struct i1480u *);
260extern void i1480u_tx_release(struct i1480u *);
261extern int i1480u_xmit_frame(struct wlp *, struct sk_buff *,
262 struct uwb_dev_addr *);
263extern void i1480u_stop_queue(struct wlp *);
264extern void i1480u_start_queue(struct wlp *);
265extern int i1480u_sysfs_setup(struct i1480u *);
266extern void i1480u_sysfs_release(struct i1480u *);
267
268/* netdev interface */
269extern int i1480u_open(struct net_device *);
270extern int i1480u_stop(struct net_device *);
271extern int i1480u_hard_start_xmit(struct sk_buff *, struct net_device *);
272extern void i1480u_tx_timeout(struct net_device *);
273extern int i1480u_set_config(struct net_device *, struct ifmap *);
274extern struct net_device_stats *i1480u_get_stats(struct net_device *);
275extern int i1480u_change_mtu(struct net_device *, int);
276extern void i1480u_uwb_notifs_cb(void *, struct uwb_dev *, enum uwb_notifs);
277
278/* bandwidth allocation callback */
279extern void i1480u_bw_alloc_cb(struct uwb_rsv *);
280
281/* Sys FS */
282extern struct attribute_group i1480u_wlp_attr_group;
283
284#endif /* #ifndef __i1480u_wlp_h__ */
diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c
new file mode 100644
index 000000000000..737d60cd5b73
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/lc.c
@@ -0,0 +1,421 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * This implements a very simple network driver for the WLP USB
26 * device that is associated to a UWB (Ultra Wide Band) host.
27 *
28 * This is seen as an interface of a composite device. Once the UWB
29 * host has an association to another WLP capable device, the
30 * networking interface (aka WLP) can start to send packets back and
31 * forth.
32 *
33 * Limitations:
34 *
35 * - Hand cranked; can't ifup the interface until there is an association
36 *
37 * - BW allocation very simplistic [see i1480u_mas_set() and callees].
38 *
39 *
40 * ROADMAP:
41 *
42 * ENTRY POINTS (driver model):
43 *
44 * i1480u_driver_{exit,init}(): initialization of the driver.
45 *
46 * i1480u_probe(): called by the driver code when a device
47 * matching 'i1480u_id_table' is connected.
48 *
49 * This allocs a netdev instance, inits with
50 * i1480u_add(), then registers_netdev().
51 * i1480u_init()
52 * i1480u_add()
53 *
54 * i1480u_disconnect(): device has been disconnected/module
55 * is being removed.
56 * i1480u_rm()
57 */
58#include <linux/version.h>
59#include <linux/if_arp.h>
60#include <linux/etherdevice.h>
61#include <linux/uwb/debug.h>
62#include "i1480u-wlp.h"
63
64
65
66static inline
67void i1480u_init(struct i1480u *i1480u)
68{
69 /* nothing so far... doesn't it suck? */
70 spin_lock_init(&i1480u->lock);
71 INIT_LIST_HEAD(&i1480u->tx_list);
72 spin_lock_init(&i1480u->tx_list_lock);
73 wlp_options_init(&i1480u->options);
74 edc_init(&i1480u->tx_errors);
75 edc_init(&i1480u->rx_errors);
76#ifdef i1480u_FLOW_CONTROL
77 edc_init(&i1480u->notif_edc);
78#endif
79 stats_init(&i1480u->lqe_stats);
80 stats_init(&i1480u->rssi_stats);
81 wlp_init(&i1480u->wlp);
82}
83
84/**
85 * Fill WLP device information structure
86 *
87 * The structure will contain a few character arrays, each ending with a
88 * null terminated string. Each string has to fit (excluding terminating
89 * character) into a specified range obtained from the WLP substack.
90 *
91 * It is still not clear exactly how this device information should be
92 * obtained. Until we find out we use the USB device descriptor as backup, some
93 * information elements have intuitive mappings, other not.
94 */
95static
96void i1480u_fill_device_info(struct wlp *wlp, struct wlp_device_info *dev_info)
97{
98 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
99 struct usb_device *usb_dev = i1480u->usb_dev;
100 /* Treat device name and model name the same */
101 if (usb_dev->descriptor.iProduct) {
102 usb_string(usb_dev, usb_dev->descriptor.iProduct,
103 dev_info->name, sizeof(dev_info->name));
104 usb_string(usb_dev, usb_dev->descriptor.iProduct,
105 dev_info->model_name, sizeof(dev_info->model_name));
106 }
107 if (usb_dev->descriptor.iManufacturer)
108 usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
109 dev_info->manufacturer,
110 sizeof(dev_info->manufacturer));
111 scnprintf(dev_info->model_nr, sizeof(dev_info->model_nr), "%04x",
112 __le16_to_cpu(usb_dev->descriptor.bcdDevice));
113 if (usb_dev->descriptor.iSerialNumber)
114 usb_string(usb_dev, usb_dev->descriptor.iSerialNumber,
115 dev_info->serial, sizeof(dev_info->serial));
116 /* FIXME: where should we obtain category? */
117 dev_info->prim_dev_type.category = cpu_to_le16(WLP_DEV_CAT_OTHER);
118 /* FIXME: Complete OUI and OUIsubdiv attributes */
119}
120
121#ifdef i1480u_FLOW_CONTROL
122/**
123 * Callback for the notification endpoint
124 *
125 * This mostly controls the xon/xoff protocol. In case of hard error,
126 * we stop the queue. If not, we always retry.
127 */
128static
129void i1480u_notif_cb(struct urb *urb, struct pt_regs *regs)
130{
131 struct i1480u *i1480u = urb->context;
132 struct usb_interface *usb_iface = i1480u->usb_iface;
133 struct device *dev = &usb_iface->dev;
134 int result;
135
136 switch (urb->status) {
137 case 0: /* Got valid data, do xon/xoff */
138 switch (i1480u->notif_buffer[0]) {
139 case 'N':
140 dev_err(dev, "XOFF STOPPING queue at %lu\n", jiffies);
141 netif_stop_queue(i1480u->net_dev);
142 break;
143 case 'A':
144 dev_err(dev, "XON STARTING queue at %lu\n", jiffies);
145 netif_start_queue(i1480u->net_dev);
146 break;
147 default:
148 dev_err(dev, "NEP: unknown data 0x%02hhx\n",
149 i1480u->notif_buffer[0]);
150 }
151 break;
152 case -ECONNRESET: /* Controlled situation ... */
153 case -ENOENT: /* we killed the URB... */
154 dev_err(dev, "NEP: URB reset/noent %d\n", urb->status);
155 goto error;
156 case -ESHUTDOWN: /* going away! */
157 dev_err(dev, "NEP: URB down %d\n", urb->status);
158 goto error;
159 default: /* Retry unless it gets ugly */
160 if (edc_inc(&i1480u->notif_edc, EDC_MAX_ERRORS,
161 EDC_ERROR_TIMEFRAME)) {
162 dev_err(dev, "NEP: URB max acceptable errors "
163 "exceeded; resetting device\n");
164 goto error_reset;
165 }
166 dev_err(dev, "NEP: URB error %d\n", urb->status);
167 break;
168 }
169 result = usb_submit_urb(urb, GFP_ATOMIC);
170 if (result < 0) {
171 dev_err(dev, "NEP: Can't resubmit URB: %d; resetting device\n",
172 result);
173 goto error_reset;
174 }
175 return;
176
177error_reset:
178 wlp_reset_all(&i1480-wlp);
179error:
180 netif_stop_queue(i1480u->net_dev);
181 return;
182}
183#endif
184
185static
186int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface)
187{
188 int result = -ENODEV;
189 struct wlp *wlp = &i1480u->wlp;
190 struct usb_device *usb_dev = interface_to_usbdev(iface);
191 struct net_device *net_dev = i1480u->net_dev;
192 struct uwb_rc *rc;
193 struct uwb_dev *uwb_dev;
194#ifdef i1480u_FLOW_CONTROL
195 struct usb_endpoint_descriptor *epd;
196#endif
197
198 i1480u->usb_dev = usb_get_dev(usb_dev);
199 i1480u->usb_iface = iface;
200 rc = uwb_rc_get_by_grandpa(&i1480u->usb_dev->dev);
201 if (rc == NULL) {
202 dev_err(&iface->dev, "Cannot get associated UWB Radio "
203 "Controller\n");
204 goto out;
205 }
206 wlp->xmit_frame = i1480u_xmit_frame;
207 wlp->fill_device_info = i1480u_fill_device_info;
208 wlp->stop_queue = i1480u_stop_queue;
209 wlp->start_queue = i1480u_start_queue;
210 result = wlp_setup(wlp, rc);
211 if (result < 0) {
212 dev_err(&iface->dev, "Cannot setup WLP\n");
213 goto error_wlp_setup;
214 }
215 result = 0;
216 ether_setup(net_dev); /* make it an etherdevice */
217 uwb_dev = &rc->uwb_dev;
218 /* FIXME: hookup address change notifications? */
219
220 memcpy(net_dev->dev_addr, uwb_dev->mac_addr.data,
221 sizeof(net_dev->dev_addr));
222
223 net_dev->hard_header_len = sizeof(struct untd_hdr_cmp)
224 + sizeof(struct wlp_tx_hdr)
225 + WLP_DATA_HLEN
226 + ETH_HLEN;
227 net_dev->mtu = 3500;
228 net_dev->tx_queue_len = 20; /* FIXME: maybe use 1000? */
229
230/* net_dev->flags &= ~IFF_BROADCAST; FIXME: BUG in firmware */
231 /* FIXME: multicast disabled */
232 net_dev->flags &= ~IFF_MULTICAST;
233 net_dev->features &= ~NETIF_F_SG;
234 net_dev->features &= ~NETIF_F_FRAGLIST;
235 /* All NETIF_F_*_CSUM disabled */
236 net_dev->features |= NETIF_F_HIGHDMA;
237 net_dev->watchdog_timeo = 5*HZ; /* FIXME: a better default? */
238
239 net_dev->open = i1480u_open;
240 net_dev->stop = i1480u_stop;
241 net_dev->hard_start_xmit = i1480u_hard_start_xmit;
242 net_dev->tx_timeout = i1480u_tx_timeout;
243 net_dev->get_stats = i1480u_get_stats;
244 net_dev->set_config = i1480u_set_config;
245 net_dev->change_mtu = i1480u_change_mtu;
246
247#ifdef i1480u_FLOW_CONTROL
248 /* Notification endpoint setup (submitted when we open the device) */
249 i1480u->notif_urb = usb_alloc_urb(0, GFP_KERNEL);
250 if (i1480u->notif_urb == NULL) {
251 dev_err(&iface->dev, "Unable to allocate notification URB\n");
252 result = -ENOMEM;
253 goto error_urb_alloc;
254 }
255 epd = &iface->cur_altsetting->endpoint[0].desc;
256 usb_fill_int_urb(i1480u->notif_urb, usb_dev,
257 usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
258 i1480u->notif_buffer, sizeof(i1480u->notif_buffer),
259 i1480u_notif_cb, i1480u, epd->bInterval);
260
261#endif
262
263 i1480u->tx_inflight.max = i1480u_TX_INFLIGHT_MAX;
264 i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD;
265 i1480u->tx_inflight.restart_ts = jiffies;
266 usb_set_intfdata(iface, i1480u);
267 return result;
268
269#ifdef i1480u_FLOW_CONTROL
270error_urb_alloc:
271#endif
272 wlp_remove(wlp);
273error_wlp_setup:
274 uwb_rc_put(rc);
275out:
276 usb_put_dev(i1480u->usb_dev);
277 return result;
278}
279
280static void i1480u_rm(struct i1480u *i1480u)
281{
282 struct uwb_rc *rc = i1480u->wlp.rc;
283 usb_set_intfdata(i1480u->usb_iface, NULL);
284#ifdef i1480u_FLOW_CONTROL
285 usb_kill_urb(i1480u->notif_urb);
286 usb_free_urb(i1480u->notif_urb);
287#endif
288 wlp_remove(&i1480u->wlp);
289 uwb_rc_put(rc);
290 usb_put_dev(i1480u->usb_dev);
291}
292
293/** Just setup @net_dev's i1480u private data */
294static void i1480u_netdev_setup(struct net_device *net_dev)
295{
296 struct i1480u *i1480u = netdev_priv(net_dev);
297 /* Initialize @i1480u */
298 memset(i1480u, 0, sizeof(*i1480u));
299 i1480u_init(i1480u);
300}
301
302/**
303 * Probe a i1480u interface and register it
304 *
305 * @iface: USB interface to link to
306 * @id: USB class/subclass/protocol id
307 * @returns: 0 if ok, < 0 errno code on error.
308 *
309 * Does basic housekeeping stuff and then allocs a netdev with space
310 * for the i1480u data. Initializes, registers in i1480u, registers in
311 * netdev, ready to go.
312 */
313static int i1480u_probe(struct usb_interface *iface,
314 const struct usb_device_id *id)
315{
316 int result;
317 struct net_device *net_dev;
318 struct device *dev = &iface->dev;
319 struct i1480u *i1480u;
320
321 /* Allocate instance [calls i1480u_netdev_setup() on it] */
322 result = -ENOMEM;
323 net_dev = alloc_netdev(sizeof(*i1480u), "wlp%d", i1480u_netdev_setup);
324 if (net_dev == NULL) {
325 dev_err(dev, "no memory for network device instance\n");
326 goto error_alloc_netdev;
327 }
328 SET_NETDEV_DEV(net_dev, dev);
329 i1480u = netdev_priv(net_dev);
330 i1480u->net_dev = net_dev;
331 result = i1480u_add(i1480u, iface); /* Now setup all the wlp stuff */
332 if (result < 0) {
333 dev_err(dev, "cannot add i1480u device: %d\n", result);
334 goto error_i1480u_add;
335 }
336 result = register_netdev(net_dev); /* Okey dokey, bring it up */
337 if (result < 0) {
338 dev_err(dev, "cannot register network device: %d\n", result);
339 goto error_register_netdev;
340 }
341 i1480u_sysfs_setup(i1480u);
342 if (result < 0)
343 goto error_sysfs_init;
344 return 0;
345
346error_sysfs_init:
347 unregister_netdev(net_dev);
348error_register_netdev:
349 i1480u_rm(i1480u);
350error_i1480u_add:
351 free_netdev(net_dev);
352error_alloc_netdev:
353 return result;
354}
355
356
357/**
358 * Disconect a i1480u from the system.
359 *
360 * i1480u_stop() has been called before, so al the rx and tx contexts
361 * have been taken down already. Make sure the queue is stopped,
362 * unregister netdev and i1480u, free and kill.
363 */
364static void i1480u_disconnect(struct usb_interface *iface)
365{
366 struct i1480u *i1480u;
367 struct net_device *net_dev;
368
369 i1480u = usb_get_intfdata(iface);
370 net_dev = i1480u->net_dev;
371 netif_stop_queue(net_dev);
372#ifdef i1480u_FLOW_CONTROL
373 usb_kill_urb(i1480u->notif_urb);
374#endif
375 i1480u_sysfs_release(i1480u);
376 unregister_netdev(net_dev);
377 i1480u_rm(i1480u);
378 free_netdev(net_dev);
379}
380
381static struct usb_device_id i1480u_id_table[] = {
382 {
383 .match_flags = USB_DEVICE_ID_MATCH_DEVICE \
384 | USB_DEVICE_ID_MATCH_DEV_INFO \
385 | USB_DEVICE_ID_MATCH_INT_INFO,
386 .idVendor = 0x8086,
387 .idProduct = 0x0c3b,
388 .bDeviceClass = 0xef,
389 .bDeviceSubClass = 0x02,
390 .bDeviceProtocol = 0x02,
391 .bInterfaceClass = 0xff,
392 .bInterfaceSubClass = 0xff,
393 .bInterfaceProtocol = 0xff,
394 },
395 {},
396};
397MODULE_DEVICE_TABLE(usb, i1480u_id_table);
398
399static struct usb_driver i1480u_driver = {
400 .name = KBUILD_MODNAME,
401 .probe = i1480u_probe,
402 .disconnect = i1480u_disconnect,
403 .id_table = i1480u_id_table,
404};
405
406static int __init i1480u_driver_init(void)
407{
408 return usb_register(&i1480u_driver);
409}
410module_init(i1480u_driver_init);
411
412
413static void __exit i1480u_driver_exit(void)
414{
415 usb_deregister(&i1480u_driver);
416}
417module_exit(i1480u_driver_exit);
418
419MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
420MODULE_DESCRIPTION("i1480 Wireless UWB Link WLP networking for USB");
421MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c
new file mode 100644
index 000000000000..8802ac43d872
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c
@@ -0,0 +1,368 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * Implementation of the netdevice linkage (except tx and rx related stuff).
26 *
27 * ROADMAP:
28 *
29 * ENTRY POINTS (Net device):
30 *
31 * i1480u_open(): Called when we ifconfig up the interface;
32 * associates to a UWB host controller, reserves
33 * bandwidth (MAS), sets up RX USB URB and starts
34 * the queue.
35 *
36 * i1480u_stop(): Called when we ifconfig down a interface;
37 * reverses _open().
38 *
39 * i1480u_set_config():
40 */
41
42#include <linux/if_arp.h>
43#include <linux/etherdevice.h>
44#include <linux/uwb/debug.h>
45#include "i1480u-wlp.h"
46
47struct i1480u_cmd_set_ip_mas {
48 struct uwb_rccb rccb;
49 struct uwb_dev_addr addr;
50 u8 stream;
51 u8 owner;
52 u8 type; /* enum uwb_drp_type */
53 u8 baMAS[32];
54} __attribute__((packed));
55
56
57static
58int i1480u_set_ip_mas(
59 struct uwb_rc *rc,
60 const struct uwb_dev_addr *dstaddr,
61 u8 stream, u8 owner, u8 type, unsigned long *mas)
62{
63
64 int result;
65 struct i1480u_cmd_set_ip_mas *cmd;
66 struct uwb_rc_evt_confirm reply;
67
68 result = -ENOMEM;
69 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
70 if (cmd == NULL)
71 goto error_kzalloc;
72 cmd->rccb.bCommandType = 0xfd;
73 cmd->rccb.wCommand = cpu_to_le16(0x000e);
74 cmd->addr = *dstaddr;
75 cmd->stream = stream;
76 cmd->owner = owner;
77 cmd->type = type;
78 if (mas == NULL)
79 memset(cmd->baMAS, 0x00, sizeof(cmd->baMAS));
80 else
81 memcpy(cmd->baMAS, mas, sizeof(cmd->baMAS));
82 reply.rceb.bEventType = 0xfd;
83 reply.rceb.wEvent = cpu_to_le16(0x000e);
84 result = uwb_rc_cmd(rc, "SET-IP-MAS", &cmd->rccb, sizeof(*cmd),
85 &reply.rceb, sizeof(reply));
86 if (result < 0)
87 goto error_cmd;
88 if (reply.bResultCode != UWB_RC_RES_FAIL) {
89 dev_err(&rc->uwb_dev.dev,
90 "SET-IP-MAS: command execution failed: %d\n",
91 reply.bResultCode);
92 result = -EIO;
93 }
94error_cmd:
95 kfree(cmd);
96error_kzalloc:
97 return result;
98}
99
100/*
101 * Inform a WLP interface of a MAS reservation
102 *
103 * @rc is assumed refcnted.
104 */
105/* FIXME: detect if remote device is WLP capable? */
106static int i1480u_mas_set_dev(struct uwb_dev *uwb_dev, struct uwb_rc *rc,
107 u8 stream, u8 owner, u8 type, unsigned long *mas)
108{
109 int result = 0;
110 struct device *dev = &rc->uwb_dev.dev;
111
112 result = i1480u_set_ip_mas(rc, &uwb_dev->dev_addr, stream, owner,
113 type, mas);
114 if (result < 0) {
115 char rcaddrbuf[UWB_ADDR_STRSIZE], devaddrbuf[UWB_ADDR_STRSIZE];
116 uwb_dev_addr_print(rcaddrbuf, sizeof(rcaddrbuf),
117 &rc->uwb_dev.dev_addr);
118 uwb_dev_addr_print(devaddrbuf, sizeof(devaddrbuf),
119 &uwb_dev->dev_addr);
120 dev_err(dev, "Set IP MAS (%s to %s) failed: %d\n",
121 rcaddrbuf, devaddrbuf, result);
122 }
123 return result;
124}
125
126/**
127 * Called by bandwidth allocator when change occurs in reservation.
128 *
129 * @rsv: The reservation that is being established, modified, or
130 * terminated.
131 *
132 * When a reservation is established, modified, or terminated the upper layer
133 * (WLP here) needs set/update the currently available Media Access Slots
134 * that can be use for IP traffic.
135 *
136 * Our action taken during failure depends on how the reservation is being
137 * changed:
138 * - if reservation is being established we do nothing if we cannot set the
139 * new MAS to be used
140 * - if reservation is being terminated we revert back to PCA whether the
141 * SET IP MAS command succeeds or not.
142 */
143void i1480u_bw_alloc_cb(struct uwb_rsv *rsv)
144{
145 int result = 0;
146 struct i1480u *i1480u = rsv->pal_priv;
147 struct device *dev = &i1480u->usb_iface->dev;
148 struct uwb_dev *target_dev = rsv->target.dev;
149 struct uwb_rc *rc = i1480u->wlp.rc;
150 u8 stream = rsv->stream;
151 int type = rsv->type;
152 int is_owner = rsv->owner == &rc->uwb_dev;
153 unsigned long *bmp = rsv->mas.bm;
154
155 dev_err(dev, "WLP callback called - sending set ip mas\n");
156 /*user cannot change options while setting configuration*/
157 mutex_lock(&i1480u->options.mutex);
158 switch (rsv->state) {
159 case UWB_RSV_STATE_T_ACCEPTED:
160 case UWB_RSV_STATE_O_ESTABLISHED:
161 result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner,
162 type, bmp);
163 if (result < 0) {
164 dev_err(dev, "MAS reservation failed: %d\n", result);
165 goto out;
166 }
167 if (is_owner) {
168 wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr,
169 WLP_DRP | stream);
170 wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 0);
171 }
172 break;
173 case UWB_RSV_STATE_NONE:
174 /* revert back to PCA */
175 result = i1480u_mas_set_dev(target_dev, rc, stream, is_owner,
176 type, bmp);
177 if (result < 0)
178 dev_err(dev, "MAS reservation failed: %d\n", result);
179 /* Revert to PCA even though SET IP MAS failed. */
180 wlp_tx_hdr_set_delivery_id_type(&i1480u->options.def_tx_hdr,
181 i1480u->options.pca_base_priority);
182 wlp_tx_hdr_set_rts_cts(&i1480u->options.def_tx_hdr, 1);
183 break;
184 default:
185 dev_err(dev, "unexpected WLP reservation state: %s (%d).\n",
186 uwb_rsv_state_str(rsv->state), rsv->state);
187 break;
188 }
189out:
190 mutex_unlock(&i1480u->options.mutex);
191 return;
192}
193
194/**
195 *
196 * Called on 'ifconfig up'
197 */
198int i1480u_open(struct net_device *net_dev)
199{
200 int result;
201 struct i1480u *i1480u = netdev_priv(net_dev);
202 struct wlp *wlp = &i1480u->wlp;
203 struct uwb_rc *rc;
204 struct device *dev = &i1480u->usb_iface->dev;
205
206 rc = wlp->rc;
207 result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */
208 if (result < 0)
209 goto error_rx_setup;
210 netif_wake_queue(net_dev);
211#ifdef i1480u_FLOW_CONTROL
212 result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);;
213 if (result < 0) {
214 dev_err(dev, "Can't submit notification URB: %d\n", result);
215 goto error_notif_urb_submit;
216 }
217#endif
218 i1480u->uwb_notifs_handler.cb = i1480u_uwb_notifs_cb;
219 i1480u->uwb_notifs_handler.data = i1480u;
220 if (uwb_bg_joined(rc))
221 netif_carrier_on(net_dev);
222 else
223 netif_carrier_off(net_dev);
224 uwb_notifs_register(rc, &i1480u->uwb_notifs_handler);
225 /* Interface is up with an address, now we can create WSS */
226 result = wlp_wss_setup(net_dev, &wlp->wss);
227 if (result < 0) {
228 dev_err(dev, "Can't create WSS: %d. \n", result);
229 goto error_notif_deregister;
230 }
231 return 0;
232error_notif_deregister:
233 uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler);
234#ifdef i1480u_FLOW_CONTROL
235error_notif_urb_submit:
236#endif
237 netif_stop_queue(net_dev);
238 i1480u_rx_release(i1480u);
239error_rx_setup:
240 return result;
241}
242
243
244/**
245 * Called on 'ifconfig down'
246 */
247int i1480u_stop(struct net_device *net_dev)
248{
249 struct i1480u *i1480u = netdev_priv(net_dev);
250 struct wlp *wlp = &i1480u->wlp;
251 struct uwb_rc *rc = wlp->rc;
252
253 BUG_ON(wlp->rc == NULL);
254 wlp_wss_remove(&wlp->wss);
255 uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler);
256 netif_carrier_off(net_dev);
257#ifdef i1480u_FLOW_CONTROL
258 usb_kill_urb(i1480u->notif_urb);
259#endif
260 netif_stop_queue(net_dev);
261 i1480u_rx_release(i1480u);
262 i1480u_tx_release(i1480u);
263 return 0;
264}
265
266
267/** Report statistics */
268struct net_device_stats *i1480u_get_stats(struct net_device *net_dev)
269{
270 struct i1480u *i1480u = netdev_priv(net_dev);
271 return &i1480u->stats;
272}
273
274
275/**
276 *
277 * Change the interface config--we probably don't have to do anything.
278 */
279int i1480u_set_config(struct net_device *net_dev, struct ifmap *map)
280{
281 int result;
282 struct i1480u *i1480u = netdev_priv(net_dev);
283 BUG_ON(i1480u->wlp.rc == NULL);
284 result = 0;
285 return result;
286}
287
288/**
289 * Change the MTU of the interface
290 */
291int i1480u_change_mtu(struct net_device *net_dev, int mtu)
292{
293 static union {
294 struct wlp_tx_hdr tx;
295 struct wlp_rx_hdr rx;
296 } i1480u_all_hdrs;
297
298 if (mtu < ETH_HLEN) /* We encap eth frames */
299 return -ERANGE;
300 if (mtu > 4000 - sizeof(i1480u_all_hdrs))
301 return -ERANGE;
302 net_dev->mtu = mtu;
303 return 0;
304}
305
306
307/**
308 * Callback function to handle events from UWB
309 * When we see other devices we know the carrier is ok,
310 * if we are the only device in the beacon group we set the carrier
311 * state to off.
312 * */
313void i1480u_uwb_notifs_cb(void *data, struct uwb_dev *uwb_dev,
314 enum uwb_notifs event)
315{
316 struct i1480u *i1480u = data;
317 struct net_device *net_dev = i1480u->net_dev;
318 struct device *dev = &i1480u->usb_iface->dev;
319 switch (event) {
320 case UWB_NOTIF_BG_JOIN:
321 netif_carrier_on(net_dev);
322 dev_info(dev, "Link is up\n");
323 break;
324 case UWB_NOTIF_BG_LEAVE:
325 netif_carrier_off(net_dev);
326 dev_info(dev, "Link is down\n");
327 break;
328 default:
329 dev_err(dev, "don't know how to handle event %d from uwb\n",
330 event);
331 }
332}
333
334/**
335 * Stop the network queue
336 *
337 * Enable WLP substack to stop network queue. We also set the flow control
338 * threshold at this time to prevent the flow control from restarting the
339 * queue.
340 *
341 * we are loosing the current threshold value here ... FIXME?
342 */
343void i1480u_stop_queue(struct wlp *wlp)
344{
345 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
346 struct net_device *net_dev = i1480u->net_dev;
347 i1480u->tx_inflight.threshold = 0;
348 netif_stop_queue(net_dev);
349}
350
351/**
352 * Start the network queue
353 *
354 * Enable WLP substack to start network queue. Also re-enable the flow
355 * control to manage the queue again.
356 *
357 * We re-enable the flow control by storing the default threshold in the
358 * flow control threshold. This means that if the user modified the
359 * threshold before the queue was stopped and restarted that information
360 * will be lost. FIXME?
361 */
362void i1480u_start_queue(struct wlp *wlp)
363{
364 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
365 struct net_device *net_dev = i1480u->net_dev;
366 i1480u->tx_inflight.threshold = i1480u_TX_INFLIGHT_THRESHOLD;
367 netif_start_queue(net_dev);
368}
diff --git a/drivers/uwb/i1480/i1480u-wlp/rx.c b/drivers/uwb/i1480/i1480u-wlp/rx.c
new file mode 100644
index 000000000000..9fc035354a76
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/rx.c
@@ -0,0 +1,486 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Driver for the Linux Network stack.
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * i1480u's RX handling is simple. i1480u will send the received
24 * network packets broken up in fragments; 1 to N fragments make a
25 * packet, we assemble them together and deliver the packet with netif_rx().
26 *
27 * Beacuse each USB transfer is a *single* fragment (except when the
28 * transfer contains a first fragment), each URB called thus
29 * back contains one or two fragments. So we queue N URBs, each with its own
30 * fragment buffer. When a URB is done, we process it (adding to the
31 * current skb from the fragment buffer until complete). Once
32 * processed, we requeue the URB. There is always a bunch of URBs
33 * ready to take data, so the intergap should be minimal.
34 *
35 * An URB's transfer buffer is the data field of a socket buffer. This
36 * reduces copying as data can be passed directly to network layer. If a
37 * complete packet or 1st fragment is received the URB's transfer buffer is
38 * taken away from it and used to send data to the network layer. In this
39 * case a new transfer buffer is allocated to the URB before being requeued.
40 * If a "NEXT" or "LAST" fragment is received, the fragment contents is
41 * appended to the RX packet under construction and the transfer buffer
42 * is reused. To be able to use this buffer to assemble complete packets
43 * we set each buffer's size to that of the MAX ethernet packet that can
44 * be received. There is thus room for improvement in memory usage.
45 *
46 * When the max tx fragment size increases, we should be able to read
47 * data into the skbs directly with very simple code.
48 *
49 * ROADMAP:
50 *
51 * ENTRY POINTS:
52 *
53 * i1480u_rx_setup(): setup RX context [from i1480u_open()]
54 *
55 * i1480u_rx_release(): release RX context [from i1480u_stop()]
56 *
57 * i1480u_rx_cb(): called when the RX USB URB receives a
58 * packet. It removes the header and pushes it up
59 * the Linux netdev stack with netif_rx().
60 *
61 * i1480u_rx_buffer()
62 * i1480u_drop() and i1480u_fix()
63 * i1480u_skb_deliver
64 *
65 */
66
67#include <linux/netdevice.h>
68#include <linux/etherdevice.h>
69#include "i1480u-wlp.h"
70
71#define D_LOCAL 0
72#include <linux/uwb/debug.h>
73
74
75/**
76 * Setup the RX context
77 *
78 * Each URB is provided with a transfer_buffer that is the data field
79 * of a new socket buffer.
80 */
81int i1480u_rx_setup(struct i1480u *i1480u)
82{
83 int result, cnt;
84 struct device *dev = &i1480u->usb_iface->dev;
85 struct net_device *net_dev = i1480u->net_dev;
86 struct usb_endpoint_descriptor *epd;
87 struct sk_buff *skb;
88
89 /* Alloc RX stuff */
90 i1480u->rx_skb = NULL; /* not in process of receiving packet */
91 result = -ENOMEM;
92 epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
93 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
94 struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
95 rx_buf->i1480u = i1480u;
96 skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
97 if (!skb) {
98 dev_err(dev,
99 "RX: cannot allocate RX buffer %d\n", cnt);
100 result = -ENOMEM;
101 goto error;
102 }
103 skb->dev = net_dev;
104 skb->ip_summed = CHECKSUM_NONE;
105 skb_reserve(skb, 2);
106 rx_buf->data = skb;
107 rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
108 if (unlikely(rx_buf->urb == NULL)) {
109 dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
110 result = -ENOMEM;
111 goto error;
112 }
113 usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
114 usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
115 rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
116 i1480u_rx_cb, rx_buf);
117 result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
118 if (unlikely(result < 0)) {
119 dev_err(dev, "RX: cannot submit URB %d: %d\n",
120 cnt, result);
121 goto error;
122 }
123 }
124 return 0;
125
126error:
127 i1480u_rx_release(i1480u);
128 return result;
129}
130
131
132/** Release resources associated to the rx context */
133void i1480u_rx_release(struct i1480u *i1480u)
134{
135 int cnt;
136 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
137 if (i1480u->rx_buf[cnt].data)
138 dev_kfree_skb(i1480u->rx_buf[cnt].data);
139 if (i1480u->rx_buf[cnt].urb) {
140 usb_kill_urb(i1480u->rx_buf[cnt].urb);
141 usb_free_urb(i1480u->rx_buf[cnt].urb);
142 }
143 }
144 if (i1480u->rx_skb != NULL)
145 dev_kfree_skb(i1480u->rx_skb);
146}
147
148static
149void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
150{
151 int cnt;
152 for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
153 if (i1480u->rx_buf[cnt].urb)
154 usb_unlink_urb(i1480u->rx_buf[cnt].urb);
155 }
156}
157
158/** Fix an out-of-sequence packet */
159#define i1480u_fix(i1480u, msg...) \
160do { \
161 if (printk_ratelimit()) \
162 dev_err(&i1480u->usb_iface->dev, msg); \
163 dev_kfree_skb_irq(i1480u->rx_skb); \
164 i1480u->rx_skb = NULL; \
165 i1480u->rx_untd_pkt_size = 0; \
166} while (0)
167
168
169/** Drop an out-of-sequence packet */
170#define i1480u_drop(i1480u, msg...) \
171do { \
172 if (printk_ratelimit()) \
173 dev_err(&i1480u->usb_iface->dev, msg); \
174 i1480u->stats.rx_dropped++; \
175} while (0)
176
177
178
179
180/** Finalizes setting up the SKB and delivers it
181 *
182 * We first pass the incoming frame to WLP substack for verification. It
183 * may also be a WLP association frame in which case WLP will take over the
184 * processing. If WLP does not take it over it will still verify it, if the
185 * frame is invalid the skb will be freed by WLP and we will not continue
186 * parsing.
187 * */
188static
189void i1480u_skb_deliver(struct i1480u *i1480u)
190{
191 int should_parse;
192 struct net_device *net_dev = i1480u->net_dev;
193 struct device *dev = &i1480u->usb_iface->dev;
194
195 d_printf(6, dev, "RX delivered pre skb(%p), %u bytes\n",
196 i1480u->rx_skb, i1480u->rx_skb->len);
197 d_dump(7, dev, i1480u->rx_skb->data, i1480u->rx_skb->len);
198 should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
199 &i1480u->rx_srcaddr);
200 if (!should_parse)
201 goto out;
202 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
203 d_printf(5, dev, "RX delivered skb(%p), %u bytes\n",
204 i1480u->rx_skb, i1480u->rx_skb->len);
205 d_dump(7, dev, i1480u->rx_skb->data,
206 i1480u->rx_skb->len > 72 ? 72 : i1480u->rx_skb->len);
207 i1480u->stats.rx_packets++;
208 i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size;
209 net_dev->last_rx = jiffies;
210 /* FIXME: flow control: check netif_rx() retval */
211
212 netif_rx(i1480u->rx_skb); /* deliver */
213out:
214 i1480u->rx_skb = NULL;
215 i1480u->rx_untd_pkt_size = 0;
216}
217
218
219/**
220 * Process a buffer of data received from the USB RX endpoint
221 *
222 * First fragment arrives with next or last fragment. All other fragments
223 * arrive alone.
224 *
225 * /me hates long functions.
226 */
227static
228void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
229{
230 unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */
231 size_t untd_hdr_size, untd_frg_size;
232 size_t i1480u_hdr_size;
233 struct wlp_rx_hdr *i1480u_hdr = NULL;
234
235 struct i1480u *i1480u = rx_buf->i1480u;
236 struct sk_buff *skb = rx_buf->data;
237 int size_left = rx_buf->urb->actual_length;
238 void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
239 struct untd_hdr *untd_hdr;
240
241 struct net_device *net_dev = i1480u->net_dev;
242 struct device *dev = &i1480u->usb_iface->dev;
243 struct sk_buff *new_skb;
244
245#if 0
246 dev_fnstart(dev,
247 "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
248 dev_err(dev, "RX packet, %zu bytes\n", size_left);
249 dump_bytes(dev, ptr, size_left);
250#endif
251 i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
252
253 while (size_left > 0) {
254 if (pkt_completed) {
255 i1480u_drop(i1480u, "RX: fragment follows completed"
256 "packet in same buffer. Dropping\n");
257 break;
258 }
259 untd_hdr = ptr;
260 if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */
261 i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
262 goto out;
263 }
264 if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */
265 i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
266 goto out;
267 }
268 switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */
269 case i1480u_PKT_FRAG_1ST: {
270 struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
271 dev_dbg(dev, "1st fragment\n");
272 untd_hdr_size = sizeof(struct untd_hdr_1st);
273 if (i1480u->rx_skb != NULL)
274 i1480u_fix(i1480u, "RX: 1st fragment out of "
275 "sequence! Fixing\n");
276 if (size_left < untd_hdr_size + i1480u_hdr_size) {
277 i1480u_drop(i1480u, "RX: short 1st fragment! "
278 "Dropping\n");
279 goto out;
280 }
281 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
282 - i1480u_hdr_size;
283 untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
284 if (size_left < untd_hdr_size + untd_frg_size) {
285 i1480u_drop(i1480u,
286 "RX: short payload! Dropping\n");
287 goto out;
288 }
289 i1480u->rx_skb = skb;
290 i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
291 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
292 skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
293 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
294 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
295 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
296 rx_buf->data = NULL; /* need to create new buffer */
297 break;
298 }
299 case i1480u_PKT_FRAG_NXT: {
300 dev_dbg(dev, "nxt fragment\n");
301 untd_hdr_size = sizeof(struct untd_hdr_rst);
302 if (i1480u->rx_skb == NULL) {
303 i1480u_drop(i1480u, "RX: next fragment out of "
304 "sequence! Dropping\n");
305 goto out;
306 }
307 if (size_left < untd_hdr_size) {
308 i1480u_drop(i1480u, "RX: short NXT fragment! "
309 "Dropping\n");
310 goto out;
311 }
312 untd_frg_size = le16_to_cpu(untd_hdr->len);
313 if (size_left < untd_hdr_size + untd_frg_size) {
314 i1480u_drop(i1480u,
315 "RX: short payload! Dropping\n");
316 goto out;
317 }
318 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
319 ptr + untd_hdr_size, untd_frg_size);
320 break;
321 }
322 case i1480u_PKT_FRAG_LST: {
323 dev_dbg(dev, "Lst fragment\n");
324 untd_hdr_size = sizeof(struct untd_hdr_rst);
325 if (i1480u->rx_skb == NULL) {
326 i1480u_drop(i1480u, "RX: last fragment out of "
327 "sequence! Dropping\n");
328 goto out;
329 }
330 if (size_left < untd_hdr_size) {
331 i1480u_drop(i1480u, "RX: short LST fragment! "
332 "Dropping\n");
333 goto out;
334 }
335 untd_frg_size = le16_to_cpu(untd_hdr->len);
336 if (size_left < untd_frg_size + untd_hdr_size) {
337 i1480u_drop(i1480u,
338 "RX: short payload! Dropping\n");
339 goto out;
340 }
341 memmove(skb_put(i1480u->rx_skb, untd_frg_size),
342 ptr + untd_hdr_size, untd_frg_size);
343 pkt_completed = 1;
344 break;
345 }
346 case i1480u_PKT_FRAG_CMP: {
347 dev_dbg(dev, "cmp fragment\n");
348 untd_hdr_size = sizeof(struct untd_hdr_cmp);
349 if (i1480u->rx_skb != NULL)
350 i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
351 " fragment!\n");
352 if (size_left < untd_hdr_size + i1480u_hdr_size) {
353 i1480u_drop(i1480u, "RX: short CMP fragment! "
354 "Dropping\n");
355 goto out;
356 }
357 i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
358 untd_frg_size = i1480u->rx_untd_pkt_size;
359 if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
360 i1480u_drop(i1480u,
361 "RX: short payload! Dropping\n");
362 goto out;
363 }
364 i1480u->rx_skb = skb;
365 i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
366 i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
367 stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
368 stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
369 skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
370 skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
371 rx_buf->data = NULL; /* for hand off skb to network stack */
372 pkt_completed = 1;
373 i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
374 break;
375 }
376 default:
377 i1480u_drop(i1480u, "RX: unknown packet type %u! "
378 "Dropping\n", untd_hdr_type(untd_hdr));
379 goto out;
380 }
381 size_left -= untd_hdr_size + untd_frg_size;
382 if (size_left > 0)
383 ptr += untd_hdr_size + untd_frg_size;
384 }
385 if (pkt_completed)
386 i1480u_skb_deliver(i1480u);
387out:
388 /* recreate needed RX buffers*/
389 if (rx_buf->data == NULL) {
390 /* buffer is being used to receive packet, create new */
391 new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
392 if (!new_skb) {
393 if (printk_ratelimit())
394 dev_err(dev,
395 "RX: cannot allocate RX buffer\n");
396 } else {
397 new_skb->dev = net_dev;
398 new_skb->ip_summed = CHECKSUM_NONE;
399 skb_reserve(new_skb, 2);
400 rx_buf->data = new_skb;
401 }
402 }
403 return;
404}
405
406
407/**
408 * Called when an RX URB has finished receiving or has found some kind
409 * of error condition.
410 *
411 * LIMITATIONS:
412 *
413 * - We read USB-transfers, each transfer contains a SINGLE fragment
414 * (can contain a complete packet, or a 1st, next, or last fragment
415 * of a packet).
416 * Looks like a transfer can contain more than one fragment (07/18/06)
417 *
418 * - Each transfer buffer is the size of the maximum packet size (minus
419 * headroom), i1480u_MAX_PKT_SIZE - 2
420 *
421 * - We always read the full USB-transfer, no partials.
422 *
423 * - Each transfer is read directly into a skb. This skb will be used to
424 * send data to the upper layers if it is the first fragment or a complete
425 * packet. In the other cases the data will be copied from the skb to
426 * another skb that is being prepared for the upper layers from a prev
427 * first fragment.
428 *
429 * It is simply too much of a pain. Gosh, there should be a unified
430 * SG infrastructure for *everything* [so that I could declare a SG
431 * buffer, pass it to USB for receiving, append some space to it if
432 * I wish, receive more until I have the whole chunk, adapt
433 * pointers on each fragment to remove hardware headers and then
434 * attach that to an skbuff and netif_rx()].
435 */
436void i1480u_rx_cb(struct urb *urb)
437{
438 int result;
439 int do_parse_buffer = 1;
440 struct i1480u_rx_buf *rx_buf = urb->context;
441 struct i1480u *i1480u = rx_buf->i1480u;
442 struct device *dev = &i1480u->usb_iface->dev;
443 unsigned long flags;
444 u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
445
446 switch (urb->status) {
447 case 0:
448 break;
449 case -ECONNRESET: /* Not an error, but a controlled situation; */
450 case -ENOENT: /* (we killed the URB)...so, no broadcast */
451 case -ESHUTDOWN: /* going away! */
452 dev_err(dev, "RX URB[%u]: goind down %d\n",
453 rx_buf_idx, urb->status);
454 goto error;
455 default:
456 dev_err(dev, "RX URB[%u]: unknown status %d\n",
457 rx_buf_idx, urb->status);
458 if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
459 EDC_ERROR_TIMEFRAME)) {
460 dev_err(dev, "RX: max acceptable errors exceeded,"
461 " resetting device.\n");
462 i1480u_rx_unlink_urbs(i1480u);
463 wlp_reset_all(&i1480u->wlp);
464 goto error;
465 }
466 do_parse_buffer = 0;
467 break;
468 }
469 spin_lock_irqsave(&i1480u->lock, flags);
470 /* chew the data fragments, extract network packets */
471 if (do_parse_buffer) {
472 i1480u_rx_buffer(rx_buf);
473 if (rx_buf->data) {
474 rx_buf->urb->transfer_buffer = rx_buf->data->data;
475 result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
476 if (result < 0) {
477 dev_err(dev, "RX URB[%u]: cannot submit %d\n",
478 rx_buf_idx, result);
479 }
480 }
481 }
482 spin_unlock_irqrestore(&i1480u->lock, flags);
483error:
484 return;
485}
486
diff --git a/drivers/uwb/i1480/i1480u-wlp/sysfs.c b/drivers/uwb/i1480/i1480u-wlp/sysfs.c
new file mode 100644
index 000000000000..a1d8ca6ac935
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/sysfs.c
@@ -0,0 +1,408 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Sysfs interfaces
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/uwb/debug.h>
29#include <linux/device.h>
30#include "i1480u-wlp.h"
31
32
33/**
34 *
35 * @dev: Class device from the net_device; assumed refcnted.
36 *
37 * Yes, I don't lock--we assume it is refcounted and I am getting a
38 * single byte value that is kind of atomic to read.
39 */
40ssize_t uwb_phy_rate_show(const struct wlp_options *options, char *buf)
41{
42 return sprintf(buf, "%u\n",
43 wlp_tx_hdr_phy_rate(&options->def_tx_hdr));
44}
45EXPORT_SYMBOL_GPL(uwb_phy_rate_show);
46
47
48ssize_t uwb_phy_rate_store(struct wlp_options *options,
49 const char *buf, size_t size)
50{
51 ssize_t result;
52 unsigned rate;
53
54 result = sscanf(buf, "%u\n", &rate);
55 if (result != 1) {
56 result = -EINVAL;
57 goto out;
58 }
59 result = -EINVAL;
60 if (rate >= UWB_PHY_RATE_INVALID)
61 goto out;
62 wlp_tx_hdr_set_phy_rate(&options->def_tx_hdr, rate);
63 result = 0;
64out:
65 return result < 0 ? result : size;
66}
67EXPORT_SYMBOL_GPL(uwb_phy_rate_store);
68
69
70ssize_t uwb_rts_cts_show(const struct wlp_options *options, char *buf)
71{
72 return sprintf(buf, "%u\n",
73 wlp_tx_hdr_rts_cts(&options->def_tx_hdr));
74}
75EXPORT_SYMBOL_GPL(uwb_rts_cts_show);
76
77
78ssize_t uwb_rts_cts_store(struct wlp_options *options,
79 const char *buf, size_t size)
80{
81 ssize_t result;
82 unsigned value;
83
84 result = sscanf(buf, "%u\n", &value);
85 if (result != 1) {
86 result = -EINVAL;
87 goto out;
88 }
89 result = -EINVAL;
90 wlp_tx_hdr_set_rts_cts(&options->def_tx_hdr, !!value);
91 result = 0;
92out:
93 return result < 0 ? result : size;
94}
95EXPORT_SYMBOL_GPL(uwb_rts_cts_store);
96
97
98ssize_t uwb_ack_policy_show(const struct wlp_options *options, char *buf)
99{
100 return sprintf(buf, "%u\n",
101 wlp_tx_hdr_ack_policy(&options->def_tx_hdr));
102}
103EXPORT_SYMBOL_GPL(uwb_ack_policy_show);
104
105
106ssize_t uwb_ack_policy_store(struct wlp_options *options,
107 const char *buf, size_t size)
108{
109 ssize_t result;
110 unsigned value;
111
112 result = sscanf(buf, "%u\n", &value);
113 if (result != 1 || value > UWB_ACK_B_REQ) {
114 result = -EINVAL;
115 goto out;
116 }
117 wlp_tx_hdr_set_ack_policy(&options->def_tx_hdr, value);
118 result = 0;
119out:
120 return result < 0 ? result : size;
121}
122EXPORT_SYMBOL_GPL(uwb_ack_policy_store);
123
124
125/**
126 * Show the PCA base priority.
127 *
128 * We can access without locking, as the value is (for now) orthogonal
129 * to other values.
130 */
131ssize_t uwb_pca_base_priority_show(const struct wlp_options *options,
132 char *buf)
133{
134 return sprintf(buf, "%u\n",
135 options->pca_base_priority);
136}
137EXPORT_SYMBOL_GPL(uwb_pca_base_priority_show);
138
139
140/**
141 * Set the PCA base priority.
142 *
143 * We can access without locking, as the value is (for now) orthogonal
144 * to other values.
145 */
146ssize_t uwb_pca_base_priority_store(struct wlp_options *options,
147 const char *buf, size_t size)
148{
149 ssize_t result = -EINVAL;
150 u8 pca_base_priority;
151
152 result = sscanf(buf, "%hhu\n", &pca_base_priority);
153 if (result != 1) {
154 result = -EINVAL;
155 goto out;
156 }
157 result = -EINVAL;
158 if (pca_base_priority >= 8)
159 goto out;
160 options->pca_base_priority = pca_base_priority;
161 /* Update TX header if we are currently using PCA. */
162 if (result >= 0 && (wlp_tx_hdr_delivery_id_type(&options->def_tx_hdr) & WLP_DRP) == 0)
163 wlp_tx_hdr_set_delivery_id_type(&options->def_tx_hdr, options->pca_base_priority);
164 result = 0;
165out:
166 return result < 0 ? result : size;
167}
168EXPORT_SYMBOL_GPL(uwb_pca_base_priority_store);
169
170/**
171 * Show current inflight values
172 *
173 * Will print the current MAX and THRESHOLD values for the basic flow
174 * control. In addition it will report how many times the TX queue needed
175 * to be restarted since the last time this query was made.
176 */
177static ssize_t wlp_tx_inflight_show(struct i1480u_tx_inflight *inflight,
178 char *buf)
179{
180 ssize_t result;
181 unsigned long sec_elapsed = (jiffies - inflight->restart_ts)/HZ;
182 unsigned long restart_count = atomic_read(&inflight->restart_count);
183
184 result = scnprintf(buf, PAGE_SIZE, "%lu %lu %d %lu %lu %lu\n"
185 "#read: threshold max inflight_count restarts "
186 "seconds restarts/sec\n"
187 "#write: threshold max\n",
188 inflight->threshold, inflight->max,
189 atomic_read(&inflight->count),
190 restart_count, sec_elapsed,
191 sec_elapsed == 0 ? 0 : restart_count/sec_elapsed);
192 inflight->restart_ts = jiffies;
193 atomic_set(&inflight->restart_count, 0);
194 return result;
195}
196
197static
198ssize_t wlp_tx_inflight_store(struct i1480u_tx_inflight *inflight,
199 const char *buf, size_t size)
200{
201 unsigned long in_threshold, in_max;
202 ssize_t result;
203 result = sscanf(buf, "%lu %lu", &in_threshold, &in_max);
204 if (result != 2)
205 return -EINVAL;
206 if (in_max <= in_threshold)
207 return -EINVAL;
208 inflight->max = in_max;
209 inflight->threshold = in_threshold;
210 return size;
211}
212/*
213 * Glue (or function adaptors) for accesing info on sysfs
214 *
215 * [we need this indirection because the PCI driver does almost the
216 * same]
217 *
218 * Linux 2.6.21 changed how 'struct netdevice' does attributes (from
219 * having a 'struct class_dev' to having a 'struct device'). That is
220 * quite of a pain.
221 *
222 * So we try to abstract that here. i1480u_SHOW() and i1480u_STORE()
223 * create adaptors for extracting the 'struct i1480u' from a 'struct
224 * dev' and calling a function for doing a sysfs operation (as we have
225 * them factorized already). i1480u_ATTR creates the attribute file
226 * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a
227 * class_device_attr_NAME or device_attr_NAME (for group registration).
228 */
229#include <linux/version.h>
230
231#define i1480u_SHOW(name, fn, param) \
232static ssize_t i1480u_show_##name(struct device *dev, \
233 struct device_attribute *attr,\
234 char *buf) \
235{ \
236 struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \
237 return fn(&i1480u->param, buf); \
238}
239
240#define i1480u_STORE(name, fn, param) \
241static ssize_t i1480u_store_##name(struct device *dev, \
242 struct device_attribute *attr,\
243 const char *buf, size_t size)\
244{ \
245 struct i1480u *i1480u = netdev_priv(to_net_dev(dev)); \
246 return fn(&i1480u->param, buf, size); \
247}
248
249#define i1480u_ATTR(name, perm) static DEVICE_ATTR(name, perm, \
250 i1480u_show_##name,\
251 i1480u_store_##name)
252
253#define i1480u_ATTR_SHOW(name) static DEVICE_ATTR(name, \
254 S_IRUGO, \
255 i1480u_show_##name, NULL)
256
257#define i1480u_ATTR_NAME(a) (dev_attr_##a)
258
259
260/*
261 * Sysfs adaptors
262 */
263i1480u_SHOW(uwb_phy_rate, uwb_phy_rate_show, options);
264i1480u_STORE(uwb_phy_rate, uwb_phy_rate_store, options);
265i1480u_ATTR(uwb_phy_rate, S_IRUGO | S_IWUSR);
266
267i1480u_SHOW(uwb_rts_cts, uwb_rts_cts_show, options);
268i1480u_STORE(uwb_rts_cts, uwb_rts_cts_store, options);
269i1480u_ATTR(uwb_rts_cts, S_IRUGO | S_IWUSR);
270
271i1480u_SHOW(uwb_ack_policy, uwb_ack_policy_show, options);
272i1480u_STORE(uwb_ack_policy, uwb_ack_policy_store, options);
273i1480u_ATTR(uwb_ack_policy, S_IRUGO | S_IWUSR);
274
275i1480u_SHOW(uwb_pca_base_priority, uwb_pca_base_priority_show, options);
276i1480u_STORE(uwb_pca_base_priority, uwb_pca_base_priority_store, options);
277i1480u_ATTR(uwb_pca_base_priority, S_IRUGO | S_IWUSR);
278
279i1480u_SHOW(wlp_eda, wlp_eda_show, wlp);
280i1480u_STORE(wlp_eda, wlp_eda_store, wlp);
281i1480u_ATTR(wlp_eda, S_IRUGO | S_IWUSR);
282
283i1480u_SHOW(wlp_uuid, wlp_uuid_show, wlp);
284i1480u_STORE(wlp_uuid, wlp_uuid_store, wlp);
285i1480u_ATTR(wlp_uuid, S_IRUGO | S_IWUSR);
286
287i1480u_SHOW(wlp_dev_name, wlp_dev_name_show, wlp);
288i1480u_STORE(wlp_dev_name, wlp_dev_name_store, wlp);
289i1480u_ATTR(wlp_dev_name, S_IRUGO | S_IWUSR);
290
291i1480u_SHOW(wlp_dev_manufacturer, wlp_dev_manufacturer_show, wlp);
292i1480u_STORE(wlp_dev_manufacturer, wlp_dev_manufacturer_store, wlp);
293i1480u_ATTR(wlp_dev_manufacturer, S_IRUGO | S_IWUSR);
294
295i1480u_SHOW(wlp_dev_model_name, wlp_dev_model_name_show, wlp);
296i1480u_STORE(wlp_dev_model_name, wlp_dev_model_name_store, wlp);
297i1480u_ATTR(wlp_dev_model_name, S_IRUGO | S_IWUSR);
298
299i1480u_SHOW(wlp_dev_model_nr, wlp_dev_model_nr_show, wlp);
300i1480u_STORE(wlp_dev_model_nr, wlp_dev_model_nr_store, wlp);
301i1480u_ATTR(wlp_dev_model_nr, S_IRUGO | S_IWUSR);
302
303i1480u_SHOW(wlp_dev_serial, wlp_dev_serial_show, wlp);
304i1480u_STORE(wlp_dev_serial, wlp_dev_serial_store, wlp);
305i1480u_ATTR(wlp_dev_serial, S_IRUGO | S_IWUSR);
306
307i1480u_SHOW(wlp_dev_prim_category, wlp_dev_prim_category_show, wlp);
308i1480u_STORE(wlp_dev_prim_category, wlp_dev_prim_category_store, wlp);
309i1480u_ATTR(wlp_dev_prim_category, S_IRUGO | S_IWUSR);
310
311i1480u_SHOW(wlp_dev_prim_OUI, wlp_dev_prim_OUI_show, wlp);
312i1480u_STORE(wlp_dev_prim_OUI, wlp_dev_prim_OUI_store, wlp);
313i1480u_ATTR(wlp_dev_prim_OUI, S_IRUGO | S_IWUSR);
314
315i1480u_SHOW(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_show, wlp);
316i1480u_STORE(wlp_dev_prim_OUI_sub, wlp_dev_prim_OUI_sub_store, wlp);
317i1480u_ATTR(wlp_dev_prim_OUI_sub, S_IRUGO | S_IWUSR);
318
319i1480u_SHOW(wlp_dev_prim_subcat, wlp_dev_prim_subcat_show, wlp);
320i1480u_STORE(wlp_dev_prim_subcat, wlp_dev_prim_subcat_store, wlp);
321i1480u_ATTR(wlp_dev_prim_subcat, S_IRUGO | S_IWUSR);
322
323i1480u_SHOW(wlp_neighborhood, wlp_neighborhood_show, wlp);
324i1480u_ATTR_SHOW(wlp_neighborhood);
325
326i1480u_SHOW(wss_activate, wlp_wss_activate_show, wlp.wss);
327i1480u_STORE(wss_activate, wlp_wss_activate_store, wlp.wss);
328i1480u_ATTR(wss_activate, S_IRUGO | S_IWUSR);
329
330/*
331 * Show the (min, max, avg) Line Quality Estimate (LQE, in dB) as over
332 * the last 256 received WLP frames (ECMA-368 13.3).
333 *
334 * [the -7dB that have to be substracted from the LQI to make the LQE
335 * are already taken into account].
336 */
337i1480u_SHOW(wlp_lqe, stats_show, lqe_stats);
338i1480u_STORE(wlp_lqe, stats_store, lqe_stats);
339i1480u_ATTR(wlp_lqe, S_IRUGO | S_IWUSR);
340
341/*
342 * Show the Receive Signal Strength Indicator averaged over all the
343 * received WLP frames (ECMA-368 13.3). Still is not clear what
344 * this value is, but is kind of a percentage of the signal strength
345 * at the antenna.
346 */
347i1480u_SHOW(wlp_rssi, stats_show, rssi_stats);
348i1480u_STORE(wlp_rssi, stats_store, rssi_stats);
349i1480u_ATTR(wlp_rssi, S_IRUGO | S_IWUSR);
350
351/**
352 * We maintain a basic flow control counter. "count" how many TX URBs are
353 * outstanding. Only allow "max"
354 * TX URBs to be outstanding. If this value is reached the queue will be
355 * stopped. The queue will be restarted when there are
356 * "threshold" URBs outstanding.
357 */
358i1480u_SHOW(wlp_tx_inflight, wlp_tx_inflight_show, tx_inflight);
359i1480u_STORE(wlp_tx_inflight, wlp_tx_inflight_store, tx_inflight);
360i1480u_ATTR(wlp_tx_inflight, S_IRUGO | S_IWUSR);
361
362static struct attribute *i1480u_attrs[] = {
363 &i1480u_ATTR_NAME(uwb_phy_rate).attr,
364 &i1480u_ATTR_NAME(uwb_rts_cts).attr,
365 &i1480u_ATTR_NAME(uwb_ack_policy).attr,
366 &i1480u_ATTR_NAME(uwb_pca_base_priority).attr,
367 &i1480u_ATTR_NAME(wlp_lqe).attr,
368 &i1480u_ATTR_NAME(wlp_rssi).attr,
369 &i1480u_ATTR_NAME(wlp_eda).attr,
370 &i1480u_ATTR_NAME(wlp_uuid).attr,
371 &i1480u_ATTR_NAME(wlp_dev_name).attr,
372 &i1480u_ATTR_NAME(wlp_dev_manufacturer).attr,
373 &i1480u_ATTR_NAME(wlp_dev_model_name).attr,
374 &i1480u_ATTR_NAME(wlp_dev_model_nr).attr,
375 &i1480u_ATTR_NAME(wlp_dev_serial).attr,
376 &i1480u_ATTR_NAME(wlp_dev_prim_category).attr,
377 &i1480u_ATTR_NAME(wlp_dev_prim_OUI).attr,
378 &i1480u_ATTR_NAME(wlp_dev_prim_OUI_sub).attr,
379 &i1480u_ATTR_NAME(wlp_dev_prim_subcat).attr,
380 &i1480u_ATTR_NAME(wlp_neighborhood).attr,
381 &i1480u_ATTR_NAME(wss_activate).attr,
382 &i1480u_ATTR_NAME(wlp_tx_inflight).attr,
383 NULL,
384};
385
386static struct attribute_group i1480u_attr_group = {
387 .name = NULL, /* we want them in the same directory */
388 .attrs = i1480u_attrs,
389};
390
391int i1480u_sysfs_setup(struct i1480u *i1480u)
392{
393 int result;
394 struct device *dev = &i1480u->usb_iface->dev;
395 result = sysfs_create_group(&i1480u->net_dev->dev.kobj,
396 &i1480u_attr_group);
397 if (result < 0)
398 dev_err(dev, "cannot initialize sysfs attributes: %d\n",
399 result);
400 return result;
401}
402
403
404void i1480u_sysfs_release(struct i1480u *i1480u)
405{
406 sysfs_remove_group(&i1480u->net_dev->dev.kobj,
407 &i1480u_attr_group);
408}
diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c
new file mode 100644
index 000000000000..3426bfb68240
--- /dev/null
+++ b/drivers/uwb/i1480/i1480u-wlp/tx.c
@@ -0,0 +1,632 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Deal with TX (massaging data to transmit, handling it)
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Transmission engine. Get an skb, create from that a WLP transmit
24 * context, add a WLP TX header (which we keep prefilled in the
25 * device's instance), fill out the target-specific fields and
26 * fire it.
27 *
28 * ROADMAP:
29 *
30 * Entry points:
31 *
32 * i1480u_tx_release(): called by i1480u_disconnect() to release
33 * pending tx contexts.
34 *
35 * i1480u_tx_cb(): callback for TX contexts (USB URBs)
36 * i1480u_tx_destroy():
37 *
38 * i1480u_tx_timeout(): called for timeout handling from the
39 * network stack.
40 *
41 * i1480u_hard_start_xmit(): called for transmitting an skb from
42 * the network stack. Will interact with WLP
43 * substack to verify and prepare frame.
44 * i1480u_xmit_frame(): actual transmission on hardware
45 *
46 * i1480u_tx_create() Creates TX context
47 * i1480u_tx_create_1() For packets in 1 fragment
48 * i1480u_tx_create_n() For packets in >1 fragments
49 *
50 * TODO:
51 *
52 * - FIXME: rewrite using usb_sg_*(), add asynch support to
53 * usb_sg_*(). It might not make too much sense as most of
54 * the times the MTU will be smaller than one page...
55 */
56
57#include "i1480u-wlp.h"
58#define D_LOCAL 5
59#include <linux/uwb/debug.h>
60
61enum {
62 /* This is only for Next and Last TX packets */
63 i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
64 - sizeof(struct untd_hdr_rst),
65};
66
67/** Free resources allocated to a i1480u tx context. */
68static
69void i1480u_tx_free(struct i1480u_tx *wtx)
70{
71 kfree(wtx->buf);
72 if (wtx->skb)
73 dev_kfree_skb_irq(wtx->skb);
74 usb_free_urb(wtx->urb);
75 kfree(wtx);
76}
77
78static
79void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
80{
81 unsigned long flags;
82 spin_lock_irqsave(&i1480u->tx_list_lock, flags); /* not active any more */
83 list_del(&wtx->list_node);
84 i1480u_tx_free(wtx);
85 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
86}
87
88static
89void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
90{
91 unsigned long flags;
92 struct i1480u_tx *wtx, *next;
93
94 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
95 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
96 usb_unlink_urb(wtx->urb);
97 }
98 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
99}
100
101
102/**
103 * Callback for a completed tx USB URB.
104 *
105 * TODO:
106 *
107 * - FIXME: recover errors more gracefully
108 * - FIXME: handle NAKs (I dont think they come here) for flow ctl
109 */
110static
111void i1480u_tx_cb(struct urb *urb)
112{
113 struct i1480u_tx *wtx = urb->context;
114 struct i1480u *i1480u = wtx->i1480u;
115 struct net_device *net_dev = i1480u->net_dev;
116 struct device *dev = &i1480u->usb_iface->dev;
117 unsigned long flags;
118
119 switch (urb->status) {
120 case 0:
121 spin_lock_irqsave(&i1480u->lock, flags);
122 i1480u->stats.tx_packets++;
123 i1480u->stats.tx_bytes += urb->actual_length;
124 spin_unlock_irqrestore(&i1480u->lock, flags);
125 break;
126 case -ECONNRESET: /* Not an error, but a controlled situation; */
127 case -ENOENT: /* (we killed the URB)...so, no broadcast */
128 dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
129 netif_stop_queue(net_dev);
130 break;
131 case -ESHUTDOWN: /* going away! */
132 dev_dbg(dev, "notif endp: down %d\n", urb->status);
133 netif_stop_queue(net_dev);
134 break;
135 default:
136 dev_err(dev, "TX: unknown URB status %d\n", urb->status);
137 if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
138 EDC_ERROR_TIMEFRAME)) {
139 dev_err(dev, "TX: max acceptable errors exceeded."
140 "Reset device.\n");
141 netif_stop_queue(net_dev);
142 i1480u_tx_unlink_urbs(i1480u);
143 wlp_reset_all(&i1480u->wlp);
144 }
145 break;
146 }
147 i1480u_tx_destroy(i1480u, wtx);
148 if (atomic_dec_return(&i1480u->tx_inflight.count)
149 <= i1480u->tx_inflight.threshold
150 && netif_queue_stopped(net_dev)
151 && i1480u->tx_inflight.threshold != 0) {
152 if (d_test(2) && printk_ratelimit())
153 d_printf(2, dev, "Restart queue. \n");
154 netif_start_queue(net_dev);
155 atomic_inc(&i1480u->tx_inflight.restart_count);
156 }
157 return;
158}
159
160
161/**
162 * Given a buffer that doesn't fit in a single fragment, create an
163 * scatter/gather structure for delivery to the USB pipe.
164 *
165 * Implements functionality of i1480u_tx_create().
166 *
167 * @wtx: tx descriptor
168 * @skb: skb to send
169 * @gfp_mask: gfp allocation mask
170 * @returns: Pointer to @wtx if ok, NULL on error.
171 *
172 * Sorry, TOO LONG a function, but breaking it up is kind of hard
173 *
174 * This will break the buffer in chunks smaller than
175 * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
176 * to each:
177 *
178 * 1st header \
179 * i1480 tx header | fragment 1
180 * fragment data /
181 * nxt header \ fragment 2
182 * fragment data /
183 * ..
184 * ..
185 * last header \ fragment 3
186 * last fragment data /
187 *
188 * This does not fill the i1480 TX header, it is left up to the
189 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
190 *
191 * This function consumes the skb unless there is an error.
192 */
193static
194int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
195 gfp_t gfp_mask)
196{
197 int result;
198 void *pl;
199 size_t pl_size;
200
201 void *pl_itr, *buf_itr;
202 size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
203 struct untd_hdr_1st *untd_hdr_1st;
204 struct wlp_tx_hdr *wlp_tx_hdr;
205 struct untd_hdr_rst *untd_hdr_rst;
206
207 wtx->skb = NULL;
208 pl = skb->data;
209 pl_itr = pl;
210 pl_size = skb->len;
211 pl_size_left = pl_size; /* payload size */
212 /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
213 * the headers */
214 pl_size_1st = i1480u_MAX_FRG_SIZE
215 - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
216 BUG_ON(pl_size_1st > pl_size);
217 pl_size_left -= pl_size_1st;
218 /* The rest have an smaller header (no i1480 TX header). We
219 * need to break up the payload in blocks smaller than
220 * i1480u_MAX_PL_SIZE (payload excluding header). */
221 frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
222 /* Allocate space for the new buffer. In this new buffer we'll
223 * place the headers followed by the data fragment, headers,
224 * data fragments, etc..
225 */
226 result = -ENOMEM;
227 wtx->buf_size = sizeof(*untd_hdr_1st)
228 + sizeof(*wlp_tx_hdr)
229 + frgs * sizeof(*untd_hdr_rst)
230 + pl_size;
231 wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
232 if (wtx->buf == NULL)
233 goto error_buf_alloc;
234
235 buf_itr = wtx->buf; /* We got the space, let's fill it up */
236 /* Fill 1st fragment */
237 untd_hdr_1st = buf_itr;
238 buf_itr += sizeof(*untd_hdr_1st);
239 untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
240 untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
241 untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
242 untd_hdr_1st->fragment_len =
243 cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
244 memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
245 /* Set up i1480 header info */
246 wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
247 buf_itr += sizeof(*wlp_tx_hdr);
248 /* Copy the first fragment */
249 memcpy(buf_itr, pl_itr, pl_size_1st);
250 pl_itr += pl_size_1st;
251 buf_itr += pl_size_1st;
252
253 /* Now do each remaining fragment */
254 result = -EINVAL;
255 while (pl_size_left > 0) {
256 d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n",
257 pl_size_left, buf_itr - wtx->buf);
258 if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
259 > wtx->buf_size) {
260 printk(KERN_ERR "BUG: no space for header\n");
261 goto error_bug;
262 }
263 d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n",
264 pl_size_left, buf_itr - wtx->buf);
265 untd_hdr_rst = buf_itr;
266 buf_itr += sizeof(*untd_hdr_rst);
267 if (pl_size_left > i1480u_MAX_PL_SIZE) {
268 frg_pl_size = i1480u_MAX_PL_SIZE;
269 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
270 } else {
271 frg_pl_size = pl_size_left;
272 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
273 }
274 d_printf(5, NULL,
275 "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
276 pl_size_left, buf_itr - wtx->buf, frg_pl_size);
277 untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
278 untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
279 untd_hdr_rst->padding = 0;
280 if (buf_itr + frg_pl_size - wtx->buf
281 > wtx->buf_size) {
282 printk(KERN_ERR "BUG: no space for payload\n");
283 goto error_bug;
284 }
285 memcpy(buf_itr, pl_itr, frg_pl_size);
286 buf_itr += frg_pl_size;
287 pl_itr += frg_pl_size;
288 pl_size_left -= frg_pl_size;
289 d_printf(5, NULL,
290 "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
291 pl_size_left, buf_itr - wtx->buf, frg_pl_size);
292 }
293 dev_kfree_skb_irq(skb);
294 return 0;
295
296error_bug:
297 printk(KERN_ERR
298 "BUG: skb %u bytes\n"
299 "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
300 "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
301 skb->len,
302 frg_pl_size, i1480u_MAX_FRG_SIZE,
303 buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
304
305 kfree(wtx->buf);
306error_buf_alloc:
307 return result;
308}
309
310
311/**
312 * Given a buffer that fits in a single fragment, fill out a @wtx
313 * struct for transmitting it down the USB pipe.
314 *
315 * Uses the fact that we have space reserved in front of the skbuff
316 * for hardware headers :]
317 *
318 * This does not fill the i1480 TX header, it is left up to the
319 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
320 *
321 * @pl: pointer to payload data
322 * @pl_size: size of the payuload
323 *
324 * This function does not consume the @skb.
325 */
326static
327int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
328 gfp_t gfp_mask)
329{
330 struct untd_hdr_cmp *untd_hdr_cmp;
331 struct wlp_tx_hdr *wlp_tx_hdr;
332
333 wtx->buf = NULL;
334 wtx->skb = skb;
335 BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
336 wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
337 wtx->wlp_tx_hdr = wlp_tx_hdr;
338 BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
339 untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
340
341 untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
342 untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
343 untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
344 untd_hdr_cmp->padding = 0;
345 return 0;
346}
347
348
349/**
350 * Given a skb to transmit, massage it to become palatable for the TX pipe
351 *
352 * This will break the buffer in chunks smaller than
353 * i1480u_MAX_FRG_SIZE and add proper headers to each.
354 *
355 * 1st header \
356 * i1480 tx header | fragment 1
357 * fragment data /
358 * nxt header \ fragment 2
359 * fragment data /
360 * ..
361 * ..
362 * last header \ fragment 3
363 * last fragment data /
364 *
365 * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
366 *
367 * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
368 * following is composed:
369 *
370 * complete header \
371 * i1480 tx header | single fragment
372 * packet data /
373 *
374 * We were going to use s/g support, but because the interface is
375 * synch and at the end there is plenty of overhead to do it, it
376 * didn't seem that worth for data that is going to be smaller than
377 * one page.
378 */
379static
380struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
381 struct sk_buff *skb, gfp_t gfp_mask)
382{
383 int result;
384 struct usb_endpoint_descriptor *epd;
385 int usb_pipe;
386 unsigned long flags;
387
388 struct i1480u_tx *wtx;
389 const size_t pl_max_size =
390 i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
391 - sizeof(struct wlp_tx_hdr);
392
393 wtx = kmalloc(sizeof(*wtx), gfp_mask);
394 if (wtx == NULL)
395 goto error_wtx_alloc;
396 wtx->urb = usb_alloc_urb(0, gfp_mask);
397 if (wtx->urb == NULL)
398 goto error_urb_alloc;
399 epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
400 usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
401 /* Fits in a single complete packet or need to split? */
402 if (skb->len > pl_max_size) {
403 result = i1480u_tx_create_n(wtx, skb, gfp_mask);
404 if (result < 0)
405 goto error_create;
406 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
407 wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
408 } else {
409 result = i1480u_tx_create_1(wtx, skb, gfp_mask);
410 if (result < 0)
411 goto error_create;
412 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
413 skb->data, skb->len, i1480u_tx_cb, wtx);
414 }
415 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
416 list_add(&wtx->list_node, &i1480u->tx_list);
417 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
418 return wtx;
419
420error_create:
421 kfree(wtx->urb);
422error_urb_alloc:
423 kfree(wtx);
424error_wtx_alloc:
425 return NULL;
426}
427
428/**
429 * Actual fragmentation and transmission of frame
430 *
431 * @wlp: WLP substack data structure
432 * @skb: To be transmitted
433 * @dst: Device address of destination
434 * @returns: 0 on success, <0 on failure
435 *
436 * This function can also be called directly (not just from
437 * hard_start_xmit), so we also check here if the interface is up before
438 * taking sending anything.
439 */
440int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
441 struct uwb_dev_addr *dst)
442{
443 int result = -ENXIO;
444 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
445 struct device *dev = &i1480u->usb_iface->dev;
446 struct net_device *net_dev = i1480u->net_dev;
447 struct i1480u_tx *wtx;
448 struct wlp_tx_hdr *wlp_tx_hdr;
449 static unsigned char dev_bcast[2] = { 0xff, 0xff };
450#if 0
451 int lockup = 50;
452#endif
453
454 d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
455 net_dev);
456 BUG_ON(i1480u->wlp.rc == NULL);
457 if ((net_dev->flags & IFF_UP) == 0)
458 goto out;
459 result = -EBUSY;
460 if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
461 if (d_test(2) && printk_ratelimit())
462 d_printf(2, dev, "Max frames in flight "
463 "stopping queue.\n");
464 netif_stop_queue(net_dev);
465 goto error_max_inflight;
466 }
467 result = -ENOMEM;
468 wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
469 if (unlikely(wtx == NULL)) {
470 if (printk_ratelimit())
471 dev_err(dev, "TX: no memory for WLP TX URB,"
472 "dropping packet (in flight %d)\n",
473 atomic_read(&i1480u->tx_inflight.count));
474 netif_stop_queue(net_dev);
475 goto error_wtx_alloc;
476 }
477 wtx->i1480u = i1480u;
478 /* Fill out the i1480 header; @i1480u->def_tx_hdr read without
479 * locking. We do so because they are kind of orthogonal to
480 * each other (and thus not changed in an atomic batch).
481 * The ETH header is right after the WLP TX header. */
482 wlp_tx_hdr = wtx->wlp_tx_hdr;
483 *wlp_tx_hdr = i1480u->options.def_tx_hdr;
484 wlp_tx_hdr->dstaddr = *dst;
485 if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
486 && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
487 /*Broadcast message directed to DRP host. Send as best effort
488 * on PCA. */
489 wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
490 }
491
492#if 0
493 dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len);
494 dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len);
495#endif
496#if 0
497 /* simulates a device lockup after every lockup# packets */
498 if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) {
499 /* Simulate a dropped transmit interrupt */
500 net_dev->trans_start = jiffies;
501 netif_stop_queue(net_dev);
502 dev_err(dev, "Simulate lockup at %ld\n", jiffies);
503 return result;
504 }
505#endif
506
507 result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */
508 if (result < 0) {
509 dev_err(dev, "TX: cannot submit URB: %d\n", result);
510 /* We leave the freeing of skb to calling function */
511 wtx->skb = NULL;
512 goto error_tx_urb_submit;
513 }
514 atomic_inc(&i1480u->tx_inflight.count);
515 net_dev->trans_start = jiffies;
516 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
517 net_dev, result);
518 return result;
519
520error_tx_urb_submit:
521 i1480u_tx_destroy(i1480u, wtx);
522error_wtx_alloc:
523error_max_inflight:
524out:
525 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
526 net_dev, result);
527 return result;
528}
529
530
531/**
532 * Transmit an skb Called when an skbuf has to be transmitted
533 *
534 * The skb is first passed to WLP substack to ensure this is a valid
535 * frame. If valid the device address of destination will be filled and
536 * the WLP header prepended to the skb. If this step fails we fake sending
537 * the frame, if we return an error the network stack will just keep trying.
538 *
539 * Broadcast frames inside a WSS needs to be treated special as multicast is
540 * not supported. A broadcast frame is sent as unicast to each member of the
541 * WSS - this is done by the WLP substack when it finds a broadcast frame.
542 * So, we test if the WLP substack took over the skb and only transmit it
543 * if it has not (been taken over).
544 *
545 * @net_dev->xmit_lock is held
546 */
547int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
548{
549 int result;
550 struct i1480u *i1480u = netdev_priv(net_dev);
551 struct device *dev = &i1480u->usb_iface->dev;
552 struct uwb_dev_addr dst;
553
554 d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
555 net_dev);
556 BUG_ON(i1480u->wlp.rc == NULL);
557 if ((net_dev->flags & IFF_UP) == 0)
558 goto error;
559 result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
560 if (result < 0) {
561 dev_err(dev, "WLP verification of TX frame failed (%d). "
562 "Dropping packet.\n", result);
563 goto error;
564 } else if (result == 1) {
565 d_printf(6, dev, "WLP will transmit frame. \n");
566 /* trans_start time will be set when WLP actually transmits
567 * the frame */
568 goto out;
569 }
570 d_printf(6, dev, "Transmitting frame. \n");
571 result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
572 if (result < 0) {
573 dev_err(dev, "Frame TX failed (%d).\n", result);
574 goto error;
575 }
576 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
577 net_dev, result);
578 return NETDEV_TX_OK;
579error:
580 dev_kfree_skb_any(skb);
581 i1480u->stats.tx_dropped++;
582out:
583 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
584 net_dev, result);
585 return NETDEV_TX_OK;
586}
587
588
589/**
590 * Called when a pkt transmission doesn't complete in a reasonable period
591 * Device reset may sleep - do it outside of interrupt context (delayed)
592 */
593void i1480u_tx_timeout(struct net_device *net_dev)
594{
595 struct i1480u *i1480u = netdev_priv(net_dev);
596
597 wlp_reset_all(&i1480u->wlp);
598}
599
600
601void i1480u_tx_release(struct i1480u *i1480u)
602{
603 unsigned long flags;
604 struct i1480u_tx *wtx, *next;
605 int count = 0, empty;
606
607 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
608 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
609 count++;
610 usb_unlink_urb(wtx->urb);
611 }
612 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
613 count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
614 /*
615 * We don't like this sollution too much (dirty as it is), but
616 * it is cheaper than putting a refcount on each i1480u_tx and
617 * i1480uting for all of them to go away...
618 *
619 * Called when no more packets can be added to tx_list
620 * so can i1480ut for it to be empty.
621 */
622 while (1) {
623 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
624 empty = list_empty(&i1480u->tx_list);
625 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
626 if (empty)
627 break;
628 count--;
629 BUG_ON(count == 0);
630 msleep(20);
631 }
632}
diff --git a/drivers/uwb/ie.c b/drivers/uwb/ie.c
new file mode 100644
index 000000000000..cf6f3d152b9d
--- /dev/null
+++ b/drivers/uwb/ie.c
@@ -0,0 +1,541 @@
1/*
2 * Ultra Wide Band
3 * Information Element Handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Reinette Chatre <reinette.chatre@intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 * 02110-1301, USA.
22 *
23 *
24 * FIXME: docs
25 */
26
27#include "uwb-internal.h"
28#define D_LOCAL 0
29#include <linux/uwb/debug.h>
30
31/**
32 * uwb_ie_next - get the next IE in a buffer
33 * @ptr: start of the buffer containing the IE data
34 * @len: length of the buffer
35 *
36 * Both @ptr and @len are updated so subsequent calls to uwb_ie_next()
37 * will get the next IE.
38 *
39 * NULL is returned (and @ptr and @len will not be updated) if there
40 * are no more IEs in the buffer or the buffer is too short.
41 */
42struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len)
43{
44 struct uwb_ie_hdr *hdr;
45 size_t ie_len;
46
47 if (*len < sizeof(struct uwb_ie_hdr))
48 return NULL;
49
50 hdr = *ptr;
51 ie_len = sizeof(struct uwb_ie_hdr) + hdr->length;
52
53 if (*len < ie_len)
54 return NULL;
55
56 *ptr += ie_len;
57 *len -= ie_len;
58
59 return hdr;
60}
61EXPORT_SYMBOL_GPL(uwb_ie_next);
62
63/**
64 * Get the IEs that a radio controller is sending in its beacon
65 *
66 * @uwb_rc: UWB Radio Controller
67 * @returns: Size read from the system
68 *
69 * We don't need to lock the uwb_rc's mutex because we don't modify
70 * anything. Once done with the iedata buffer, call
71 * uwb_rc_ie_release(iedata). Don't call kfree on it.
72 */
73ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie)
74{
75 ssize_t result;
76 struct device *dev = &uwb_rc->uwb_dev.dev;
77 struct uwb_rccb *cmd = NULL;
78 struct uwb_rceb *reply = NULL;
79 struct uwb_rc_evt_get_ie *get_ie;
80
81 d_fnstart(3, dev, "(%p, %p)\n", uwb_rc, pget_ie);
82 result = -ENOMEM;
83 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
84 if (cmd == NULL)
85 goto error_kzalloc;
86 cmd->bCommandType = UWB_RC_CET_GENERAL;
87 cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE);
88 result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd),
89 UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE,
90 &reply);
91 if (result < 0)
92 goto error_cmd;
93 get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb);
94 if (result < sizeof(*get_ie)) {
95 dev_err(dev, "not enough data returned for decoding GET IE "
96 "(%zu bytes received vs %zu needed)\n",
97 result, sizeof(*get_ie));
98 result = -EINVAL;
99 } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) {
100 dev_err(dev, "not enough data returned for decoding GET IE "
101 "payload (%zu bytes received vs %zu needed)\n", result,
102 sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength));
103 result = -EINVAL;
104 } else
105 *pget_ie = get_ie;
106error_cmd:
107 kfree(cmd);
108error_kzalloc:
109 d_fnend(3, dev, "(%p, %p) = %d\n", uwb_rc, pget_ie, (int)result);
110 return result;
111}
112EXPORT_SYMBOL_GPL(uwb_rc_get_ie);
113
114
115/*
116 * Given a pointer to an IE, print it in ASCII/hex followed by a new line
117 *
118 * @ie_hdr: pointer to the IE header. Length is in there, and it is
119 * guaranteed that the ie_hdr->length bytes following it are
120 * safely accesible.
121 *
122 * @_data: context data passed from uwb_ie_for_each(), an struct output_ctx
123 */
124int uwb_ie_dump_hex(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr,
125 size_t offset, void *_ctx)
126{
127 struct uwb_buf_ctx *ctx = _ctx;
128 const u8 *pl = (void *)(ie_hdr + 1);
129 u8 pl_itr;
130
131 ctx->bytes += scnprintf(ctx->buf + ctx->bytes, ctx->size - ctx->bytes,
132 "%02x %02x ", (unsigned) ie_hdr->element_id,
133 (unsigned) ie_hdr->length);
134 pl_itr = 0;
135 while (pl_itr < ie_hdr->length && ctx->bytes < ctx->size)
136 ctx->bytes += scnprintf(ctx->buf + ctx->bytes,
137 ctx->size - ctx->bytes,
138 "%02x ", (unsigned) pl[pl_itr++]);
139 if (ctx->bytes < ctx->size)
140 ctx->buf[ctx->bytes++] = '\n';
141 return 0;
142}
143EXPORT_SYMBOL_GPL(uwb_ie_dump_hex);
144
145
146/**
147 * Verify that a pointer in a buffer points to valid IE
148 *
149 * @start: pointer to start of buffer in which IE appears
150 * @itr: pointer to IE inside buffer that will be verified
151 * @top: pointer to end of buffer
152 *
153 * @returns: 0 if IE is valid, <0 otherwise
154 *
155 * Verification involves checking that the buffer can contain a
156 * header and the amount of data reported in the IE header can be found in
157 * the buffer.
158 */
159static
160int uwb_rc_ie_verify(struct uwb_dev *uwb_dev, const void *start,
161 const void *itr, const void *top)
162{
163 struct device *dev = &uwb_dev->dev;
164 const struct uwb_ie_hdr *ie_hdr;
165
166 if (top - itr < sizeof(*ie_hdr)) {
167 dev_err(dev, "Bad IE: no data to decode header "
168 "(%zu bytes left vs %zu needed) at offset %zu\n",
169 top - itr, sizeof(*ie_hdr), itr - start);
170 return -EINVAL;
171 }
172 ie_hdr = itr;
173 itr += sizeof(*ie_hdr);
174 if (top - itr < ie_hdr->length) {
175 dev_err(dev, "Bad IE: not enough data for payload "
176 "(%zu bytes left vs %zu needed) at offset %zu\n",
177 top - itr, (size_t)ie_hdr->length,
178 (void *)ie_hdr - start);
179 return -EINVAL;
180 }
181 return 0;
182}
183
184
185/**
186 * Walk a buffer filled with consecutive IE's a buffer
187 *
188 * @uwb_dev: UWB device this IEs belong to (for err messages mainly)
189 *
190 * @fn: function to call with each IE; if it returns 0, we keep
191 * traversing the buffer. If it returns !0, we'll stop and return
192 * that value.
193 *
194 * @data: pointer passed to @fn
195 *
196 * @buf: buffer where the consecutive IEs are located
197 *
198 * @size: size of @buf
199 *
200 * Each IE is checked for basic correctness (there is space left for
201 * the header and the payload). If that test is failed, we stop
202 * processing. For every good IE, @fn is called.
203 */
204ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data,
205 const void *buf, size_t size)
206{
207 ssize_t result = 0;
208 const struct uwb_ie_hdr *ie_hdr;
209 const void *itr = buf, *top = itr + size;
210
211 while (itr < top) {
212 if (uwb_rc_ie_verify(uwb_dev, buf, itr, top) != 0)
213 break;
214 ie_hdr = itr;
215 itr += sizeof(*ie_hdr) + ie_hdr->length;
216 result = fn(uwb_dev, ie_hdr, itr - buf, data);
217 if (result != 0)
218 break;
219 }
220 return result;
221}
222EXPORT_SYMBOL_GPL(uwb_ie_for_each);
223
224
225/**
226 * Replace all IEs currently being transmitted by a device
227 *
228 * @cmd: pointer to the SET-IE command with the IEs to set
229 * @size: size of @buf
230 */
231int uwb_rc_set_ie(struct uwb_rc *rc, struct uwb_rc_cmd_set_ie *cmd)
232{
233 int result;
234 struct device *dev = &rc->uwb_dev.dev;
235 struct uwb_rc_evt_set_ie reply;
236
237 reply.rceb.bEventType = UWB_RC_CET_GENERAL;
238 reply.rceb.wEvent = UWB_RC_CMD_SET_IE;
239 result = uwb_rc_cmd(rc, "SET-IE", &cmd->rccb,
240 sizeof(*cmd) + le16_to_cpu(cmd->wIELength),
241 &reply.rceb, sizeof(reply));
242 if (result < 0)
243 goto error_cmd;
244 else if (result != sizeof(reply)) {
245 dev_err(dev, "SET-IE: not enough data to decode reply "
246 "(%d bytes received vs %zu needed)\n",
247 result, sizeof(reply));
248 result = -EIO;
249 } else if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
250 dev_err(dev, "SET-IE: command execution failed: %s (%d)\n",
251 uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
252 result = -EIO;
253 } else
254 result = 0;
255error_cmd:
256 return result;
257}
258
259/**
260 * Determine by IE id if IE is host settable
261 * WUSB 1.0 [8.6.2.8 Table 8.85]
262 *
263 * EXCEPTION:
264 * All but UWB_IE_WLP appears in Table 8.85 from WUSB 1.0. Setting this IE
265 * is required for the WLP substack to perform association with its WSS so
266 * we hope that the WUSB spec will be changed to reflect this.
267 */
268static
269int uwb_rc_ie_is_host_settable(enum uwb_ie element_id)
270{
271 if (element_id == UWB_PCA_AVAILABILITY ||
272 element_id == UWB_BP_SWITCH_IE ||
273 element_id == UWB_MAC_CAPABILITIES_IE ||
274 element_id == UWB_PHY_CAPABILITIES_IE ||
275 element_id == UWB_APP_SPEC_PROBE_IE ||
276 element_id == UWB_IDENTIFICATION_IE ||
277 element_id == UWB_MASTER_KEY_ID_IE ||
278 element_id == UWB_IE_WLP ||
279 element_id == UWB_APP_SPEC_IE)
280 return 1;
281 return 0;
282}
283
284
285/**
286 * Extract Host Settable IEs from IE
287 *
288 * @ie_data: pointer to buffer containing all IEs
289 * @size: size of buffer
290 *
291 * @returns: length of buffer that only includes host settable IEs
292 *
293 * Given a buffer of IEs we move all Host Settable IEs to front of buffer
294 * by overwriting the IEs that are not Host Settable.
295 * Buffer length is adjusted accordingly.
296 */
297static
298ssize_t uwb_rc_parse_host_settable_ie(struct uwb_dev *uwb_dev,
299 void *ie_data, size_t size)
300{
301 size_t new_len = size;
302 struct uwb_ie_hdr *ie_hdr;
303 size_t ie_length;
304 void *itr = ie_data, *top = itr + size;
305
306 while (itr < top) {
307 if (uwb_rc_ie_verify(uwb_dev, ie_data, itr, top) != 0)
308 break;
309 ie_hdr = itr;
310 ie_length = sizeof(*ie_hdr) + ie_hdr->length;
311 if (uwb_rc_ie_is_host_settable(ie_hdr->element_id)) {
312 itr += ie_length;
313 } else {
314 memmove(itr, itr + ie_length, top - (itr + ie_length));
315 new_len -= ie_length;
316 top -= ie_length;
317 }
318 }
319 return new_len;
320}
321
322
323/* Cleanup the whole IE management subsystem */
324void uwb_rc_ie_init(struct uwb_rc *uwb_rc)
325{
326 mutex_init(&uwb_rc->ies_mutex);
327}
328
329
330/**
331 * Set up cache for host settable IEs currently being transmitted
332 *
333 * First we just call GET-IE to get the current IEs being transmitted
334 * (or we workaround and pretend we did) and (because the format is
335 * the same) reuse that as the IE cache (with the command prefix, as
336 * explained in 'struct uwb_rc').
337 *
338 * @returns: size of cache created
339 */
340ssize_t uwb_rc_ie_setup(struct uwb_rc *uwb_rc)
341{
342 struct device *dev = &uwb_rc->uwb_dev.dev;
343 ssize_t result;
344 size_t capacity;
345 struct uwb_rc_evt_get_ie *ie_info;
346
347 d_fnstart(3, dev, "(%p)\n", uwb_rc);
348 mutex_lock(&uwb_rc->ies_mutex);
349 result = uwb_rc_get_ie(uwb_rc, &ie_info);
350 if (result < 0)
351 goto error_get_ie;
352 capacity = result;
353 d_printf(5, dev, "Got IEs %zu bytes (%zu long at %p)\n", result,
354 (size_t)le16_to_cpu(ie_info->wIELength), ie_info);
355
356 /* Remove IEs that host should not set. */
357 result = uwb_rc_parse_host_settable_ie(&uwb_rc->uwb_dev,
358 ie_info->IEData, le16_to_cpu(ie_info->wIELength));
359 if (result < 0)
360 goto error_parse;
361 d_printf(5, dev, "purged non-settable IEs to %zu bytes\n", result);
362 uwb_rc->ies = (void *) ie_info;
363 uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL;
364 uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE);
365 uwb_rc->ies_capacity = capacity;
366 d_printf(5, dev, "IE cache at %p %zu bytes, %zu capacity\n",
367 ie_info, result, capacity);
368 result = 0;
369error_parse:
370error_get_ie:
371 mutex_unlock(&uwb_rc->ies_mutex);
372 d_fnend(3, dev, "(%p) = %zu\n", uwb_rc, result);
373 return result;
374}
375
376
377/* Cleanup the whole IE management subsystem */
378void uwb_rc_ie_release(struct uwb_rc *uwb_rc)
379{
380 kfree(uwb_rc->ies);
381 uwb_rc->ies = NULL;
382 uwb_rc->ies_capacity = 0;
383}
384
385
386static
387int __acc_size(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr,
388 size_t offset, void *_ctx)
389{
390 size_t *acc_size = _ctx;
391 *acc_size += sizeof(*ie_hdr) + ie_hdr->length;
392 d_printf(6, &uwb_dev->dev, "new acc size %zu\n", *acc_size);
393 return 0;
394}
395
396
397/**
398 * Add a new IE to IEs currently being transmitted by device
399 *
400 * @ies: the buffer containing the new IE or IEs to be added to
401 * the device's beacon. The buffer will be verified for
402 * consistence (meaning the headers should be right) and
403 * consistent with the buffer size.
404 * @size: size of @ies (in bytes, total buffer size)
405 * @returns: 0 if ok, <0 errno code on error
406 *
407 * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB
408 * after the device sent the first beacon that includes the IEs specified
409 * in the SET IE command. We thus cannot send this command if the device is
410 * not beaconing. Instead, a SET IE command will be sent later right after
411 * we start beaconing.
412 *
413 * Setting an IE on the device will overwrite all current IEs in device. So
414 * we take the current IEs being transmitted by the device, append the
415 * new one, and call SET IE with all the IEs needed.
416 *
417 * The local IE cache will only be updated with the new IE if SET IE
418 * completed successfully.
419 */
420int uwb_rc_ie_add(struct uwb_rc *uwb_rc,
421 const struct uwb_ie_hdr *ies, size_t size)
422{
423 int result = 0;
424 struct device *dev = &uwb_rc->uwb_dev.dev;
425 struct uwb_rc_cmd_set_ie *new_ies;
426 size_t ies_size, total_size, acc_size = 0;
427
428 if (uwb_rc->ies == NULL)
429 return -ESHUTDOWN;
430 uwb_ie_for_each(&uwb_rc->uwb_dev, __acc_size, &acc_size, ies, size);
431 if (acc_size != size) {
432 dev_err(dev, "BUG: bad IEs, misconstructed headers "
433 "[%zu bytes reported vs %zu calculated]\n",
434 size, acc_size);
435 WARN_ON(1);
436 return -EINVAL;
437 }
438 mutex_lock(&uwb_rc->ies_mutex);
439 ies_size = le16_to_cpu(uwb_rc->ies->wIELength);
440 total_size = sizeof(*uwb_rc->ies) + ies_size;
441 if (total_size + size > uwb_rc->ies_capacity) {
442 d_printf(4, dev, "Reallocating IE cache from %p capacity %zu "
443 "to capacity %zu\n", uwb_rc->ies, uwb_rc->ies_capacity,
444 total_size + size);
445 new_ies = kzalloc(total_size + size, GFP_KERNEL);
446 if (new_ies == NULL) {
447 dev_err(dev, "No memory for adding new IE\n");
448 result = -ENOMEM;
449 goto error_alloc;
450 }
451 memcpy(new_ies, uwb_rc->ies, total_size);
452 uwb_rc->ies_capacity = total_size + size;
453 kfree(uwb_rc->ies);
454 uwb_rc->ies = new_ies;
455 d_printf(4, dev, "New IE cache at %p capacity %zu\n",
456 uwb_rc->ies, uwb_rc->ies_capacity);
457 }
458 memcpy((void *)uwb_rc->ies + total_size, ies, size);
459 uwb_rc->ies->wIELength = cpu_to_le16(ies_size + size);
460 if (uwb_rc->beaconing != -1) {
461 result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies);
462 if (result < 0) {
463 dev_err(dev, "Cannot set new IE on device: %d\n",
464 result);
465 uwb_rc->ies->wIELength = cpu_to_le16(ies_size);
466 } else
467 result = 0;
468 }
469 d_printf(4, dev, "IEs now occupy %hu bytes of %zu capacity at %p\n",
470 le16_to_cpu(uwb_rc->ies->wIELength), uwb_rc->ies_capacity,
471 uwb_rc->ies);
472error_alloc:
473 mutex_unlock(&uwb_rc->ies_mutex);
474 return result;
475}
476EXPORT_SYMBOL_GPL(uwb_rc_ie_add);
477
478
479/*
480 * Remove an IE from internal cache
481 *
482 * We are dealing with our internal IE cache so no need to verify that the
483 * IEs are valid (it has been done already).
484 *
485 * Should be called with ies_mutex held
486 *
487 * We do not break out once an IE is found in the cache. It is currently
488 * possible to have more than one IE with the same ID included in the
489 * beacon. We don't reallocate, we just mark the size smaller.
490 */
491static
492int uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove)
493{
494 struct uwb_ie_hdr *ie_hdr;
495 size_t new_len = le16_to_cpu(uwb_rc->ies->wIELength);
496 void *itr = uwb_rc->ies->IEData;
497 void *top = itr + new_len;
498
499 while (itr < top) {
500 ie_hdr = itr;
501 if (ie_hdr->element_id != to_remove) {
502 itr += sizeof(*ie_hdr) + ie_hdr->length;
503 } else {
504 int ie_length;
505 ie_length = sizeof(*ie_hdr) + ie_hdr->length;
506 if (top - itr != ie_length)
507 memmove(itr, itr + ie_length, top - itr + ie_length);
508 top -= ie_length;
509 new_len -= ie_length;
510 }
511 }
512 uwb_rc->ies->wIELength = cpu_to_le16(new_len);
513 return 0;
514}
515
516
517/**
518 * Remove an IE currently being transmitted by device
519 *
520 * @element_id: id of IE to be removed from device's beacon
521 */
522int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id)
523{
524 struct device *dev = &uwb_rc->uwb_dev.dev;
525 int result;
526
527 if (uwb_rc->ies == NULL)
528 return -ESHUTDOWN;
529 mutex_lock(&uwb_rc->ies_mutex);
530 result = uwb_rc_ie_cache_rm(uwb_rc, element_id);
531 if (result < 0)
532 dev_err(dev, "Cannot remove IE from cache.\n");
533 if (uwb_rc->beaconing != -1) {
534 result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies);
535 if (result < 0)
536 dev_err(dev, "Cannot set new IE on device.\n");
537 }
538 mutex_unlock(&uwb_rc->ies_mutex);
539 return result;
540}
541EXPORT_SYMBOL_GPL(uwb_rc_ie_rm);
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c
new file mode 100644
index 000000000000..15f856c9689a
--- /dev/null
+++ b/drivers/uwb/lc-dev.c
@@ -0,0 +1,492 @@
1/*
2 * Ultra Wide Band
3 * Life cycle of devices
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/kernel.h>
27#include <linux/device.h>
28#include <linux/err.h>
29#include <linux/kdev_t.h>
30#include <linux/random.h>
31#include "uwb-internal.h"
32
33#define D_LOCAL 1
34#include <linux/uwb/debug.h>
35
36
37/* We initialize addresses to 0xff (invalid, as it is bcast) */
38static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr)
39{
40 memset(&addr->data, 0xff, sizeof(addr->data));
41}
42
43static inline void uwb_mac_addr_init(struct uwb_mac_addr *addr)
44{
45 memset(&addr->data, 0xff, sizeof(addr->data));
46}
47
48/* @returns !0 if a device @addr is a broadcast address */
49static inline int uwb_dev_addr_bcast(const struct uwb_dev_addr *addr)
50{
51 static const struct uwb_dev_addr bcast = { .data = { 0xff, 0xff } };
52 return !uwb_dev_addr_cmp(addr, &bcast);
53}
54
55/*
56 * Add callback @new to be called when an event occurs in @rc.
57 */
58int uwb_notifs_register(struct uwb_rc *rc, struct uwb_notifs_handler *new)
59{
60 if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
61 return -ERESTARTSYS;
62 list_add(&new->list_node, &rc->notifs_chain.list);
63 mutex_unlock(&rc->notifs_chain.mutex);
64 return 0;
65}
66EXPORT_SYMBOL_GPL(uwb_notifs_register);
67
68/*
69 * Remove event handler (callback)
70 */
71int uwb_notifs_deregister(struct uwb_rc *rc, struct uwb_notifs_handler *entry)
72{
73 if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
74 return -ERESTARTSYS;
75 list_del(&entry->list_node);
76 mutex_unlock(&rc->notifs_chain.mutex);
77 return 0;
78}
79EXPORT_SYMBOL_GPL(uwb_notifs_deregister);
80
81/*
82 * Notify all event handlers of a given event on @rc
83 *
84 * We are called with a valid reference to the device, or NULL if the
85 * event is not for a particular event (e.g., a BG join event).
86 */
87void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event)
88{
89 struct uwb_notifs_handler *handler;
90 if (mutex_lock_interruptible(&rc->notifs_chain.mutex))
91 return;
92 if (!list_empty(&rc->notifs_chain.list)) {
93 list_for_each_entry(handler, &rc->notifs_chain.list, list_node) {
94 handler->cb(handler->data, uwb_dev, event);
95 }
96 }
97 mutex_unlock(&rc->notifs_chain.mutex);
98}
99
100/*
101 * Release the backing device of a uwb_dev that has been dynamically allocated.
102 */
103static void uwb_dev_sys_release(struct device *dev)
104{
105 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
106
107 d_fnstart(4, NULL, "(dev %p uwb_dev %p)\n", dev, uwb_dev);
108 uwb_bce_put(uwb_dev->bce);
109 d_printf(0, &uwb_dev->dev, "uwb_dev %p freed\n", uwb_dev);
110 memset(uwb_dev, 0x69, sizeof(*uwb_dev));
111 kfree(uwb_dev);
112 d_fnend(4, NULL, "(dev %p uwb_dev %p) = void\n", dev, uwb_dev);
113}
114
115/*
116 * Initialize a UWB device instance
117 *
118 * Alloc, zero and call this function.
119 */
120void uwb_dev_init(struct uwb_dev *uwb_dev)
121{
122 mutex_init(&uwb_dev->mutex);
123 device_initialize(&uwb_dev->dev);
124 uwb_dev->dev.release = uwb_dev_sys_release;
125 uwb_dev_addr_init(&uwb_dev->dev_addr);
126 uwb_mac_addr_init(&uwb_dev->mac_addr);
127 bitmap_fill(uwb_dev->streams, UWB_NUM_GLOBAL_STREAMS);
128}
129
130static ssize_t uwb_dev_EUI_48_show(struct device *dev,
131 struct device_attribute *attr, char *buf)
132{
133 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
134 char addr[UWB_ADDR_STRSIZE];
135
136 uwb_mac_addr_print(addr, sizeof(addr), &uwb_dev->mac_addr);
137 return sprintf(buf, "%s\n", addr);
138}
139static DEVICE_ATTR(EUI_48, S_IRUGO, uwb_dev_EUI_48_show, NULL);
140
141static ssize_t uwb_dev_DevAddr_show(struct device *dev,
142 struct device_attribute *attr, char *buf)
143{
144 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
145 char addr[UWB_ADDR_STRSIZE];
146
147 uwb_dev_addr_print(addr, sizeof(addr), &uwb_dev->dev_addr);
148 return sprintf(buf, "%s\n", addr);
149}
150static DEVICE_ATTR(DevAddr, S_IRUGO, uwb_dev_DevAddr_show, NULL);
151
152/*
153 * Show the BPST of this device.
154 *
155 * Calculated from the receive time of the device's beacon and it's
156 * slot number.
157 */
158static ssize_t uwb_dev_BPST_show(struct device *dev,
159 struct device_attribute *attr, char *buf)
160{
161 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
162 struct uwb_beca_e *bce;
163 struct uwb_beacon_frame *bf;
164 u16 bpst;
165
166 bce = uwb_dev->bce;
167 mutex_lock(&bce->mutex);
168 bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo;
169 bpst = bce->be->wBPSTOffset
170 - (u16)(bf->Beacon_Slot_Number * UWB_BEACON_SLOT_LENGTH_US);
171 mutex_unlock(&bce->mutex);
172
173 return sprintf(buf, "%d\n", bpst);
174}
175static DEVICE_ATTR(BPST, S_IRUGO, uwb_dev_BPST_show, NULL);
176
177/*
178 * Show the IEs a device is beaconing
179 *
180 * We need to access the beacon cache, so we just lock it really
181 * quick, print the IEs and unlock.
182 *
183 * We have a reference on the cache entry, so that should be
184 * quite safe.
185 */
186static ssize_t uwb_dev_IEs_show(struct device *dev,
187 struct device_attribute *attr, char *buf)
188{
189 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
190
191 return uwb_bce_print_IEs(uwb_dev, uwb_dev->bce, buf, PAGE_SIZE);
192}
193static DEVICE_ATTR(IEs, S_IRUGO | S_IWUSR, uwb_dev_IEs_show, NULL);
194
195static ssize_t uwb_dev_LQE_show(struct device *dev,
196 struct device_attribute *attr, char *buf)
197{
198 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
199 struct uwb_beca_e *bce = uwb_dev->bce;
200 size_t result;
201
202 mutex_lock(&bce->mutex);
203 result = stats_show(&uwb_dev->bce->lqe_stats, buf);
204 mutex_unlock(&bce->mutex);
205 return result;
206}
207
208static ssize_t uwb_dev_LQE_store(struct device *dev,
209 struct device_attribute *attr,
210 const char *buf, size_t size)
211{
212 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
213 struct uwb_beca_e *bce = uwb_dev->bce;
214 ssize_t result;
215
216 mutex_lock(&bce->mutex);
217 result = stats_store(&uwb_dev->bce->lqe_stats, buf, size);
218 mutex_unlock(&bce->mutex);
219 return result;
220}
221static DEVICE_ATTR(LQE, S_IRUGO | S_IWUSR, uwb_dev_LQE_show, uwb_dev_LQE_store);
222
223static ssize_t uwb_dev_RSSI_show(struct device *dev,
224 struct device_attribute *attr, char *buf)
225{
226 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
227 struct uwb_beca_e *bce = uwb_dev->bce;
228 size_t result;
229
230 mutex_lock(&bce->mutex);
231 result = stats_show(&uwb_dev->bce->rssi_stats, buf);
232 mutex_unlock(&bce->mutex);
233 return result;
234}
235
236static ssize_t uwb_dev_RSSI_store(struct device *dev,
237 struct device_attribute *attr,
238 const char *buf, size_t size)
239{
240 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
241 struct uwb_beca_e *bce = uwb_dev->bce;
242 ssize_t result;
243
244 mutex_lock(&bce->mutex);
245 result = stats_store(&uwb_dev->bce->rssi_stats, buf, size);
246 mutex_unlock(&bce->mutex);
247 return result;
248}
249static DEVICE_ATTR(RSSI, S_IRUGO | S_IWUSR, uwb_dev_RSSI_show, uwb_dev_RSSI_store);
250
251
252static struct attribute *dev_attrs[] = {
253 &dev_attr_EUI_48.attr,
254 &dev_attr_DevAddr.attr,
255 &dev_attr_BPST.attr,
256 &dev_attr_IEs.attr,
257 &dev_attr_LQE.attr,
258 &dev_attr_RSSI.attr,
259 NULL,
260};
261
262static struct attribute_group dev_attr_group = {
263 .attrs = dev_attrs,
264};
265
266static struct attribute_group *groups[] = {
267 &dev_attr_group,
268 NULL,
269};
270
271/**
272 * Device SYSFS registration
273 *
274 *
275 */
276static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev)
277{
278 int result;
279 struct device *dev;
280
281 d_fnstart(4, NULL, "(uwb_dev %p parent_dev %p)\n", uwb_dev, parent_dev);
282 BUG_ON(parent_dev == NULL);
283
284 dev = &uwb_dev->dev;
285 /* Device sysfs files are only useful for neighbor devices not
286 local radio controllers. */
287 if (&uwb_dev->rc->uwb_dev != uwb_dev)
288 dev->groups = groups;
289 dev->parent = parent_dev;
290 dev_set_drvdata(dev, uwb_dev);
291
292 result = device_add(dev);
293 d_fnend(4, NULL, "(uwb_dev %p parent_dev %p) = %d\n", uwb_dev, parent_dev, result);
294 return result;
295}
296
297
298static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev)
299{
300 d_fnstart(4, NULL, "(uwb_dev %p)\n", uwb_dev);
301 dev_set_drvdata(&uwb_dev->dev, NULL);
302 device_del(&uwb_dev->dev);
303 d_fnend(4, NULL, "(uwb_dev %p) = void\n", uwb_dev);
304}
305
306
307/**
308 * Register and initialize a new UWB device
309 *
310 * Did you call uwb_dev_init() on it?
311 *
312 * @parent_rc: is the parent radio controller who has the link to the
313 * device. When registering the UWB device that is a UWB
314 * Radio Controller, we point back to it.
315 *
316 * If registering the device that is part of a radio, caller has set
317 * rc->uwb_dev->dev. Otherwise it is to be left NULL--a new one will
318 * be allocated.
319 */
320int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev,
321 struct uwb_rc *parent_rc)
322{
323 int result;
324 struct device *dev;
325
326 BUG_ON(uwb_dev == NULL);
327 BUG_ON(parent_dev == NULL);
328 BUG_ON(parent_rc == NULL);
329
330 mutex_lock(&uwb_dev->mutex);
331 dev = &uwb_dev->dev;
332 uwb_dev->rc = parent_rc;
333 result = __uwb_dev_sys_add(uwb_dev, parent_dev);
334 if (result < 0)
335 printk(KERN_ERR "UWB: unable to register dev %s with sysfs: %d\n",
336 dev_name(dev), result);
337 mutex_unlock(&uwb_dev->mutex);
338 return result;
339}
340
341
342void uwb_dev_rm(struct uwb_dev *uwb_dev)
343{
344 mutex_lock(&uwb_dev->mutex);
345 __uwb_dev_sys_rm(uwb_dev);
346 mutex_unlock(&uwb_dev->mutex);
347}
348
349
350static
351int __uwb_dev_try_get(struct device *dev, void *__target_uwb_dev)
352{
353 struct uwb_dev *target_uwb_dev = __target_uwb_dev;
354 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
355 if (uwb_dev == target_uwb_dev) {
356 uwb_dev_get(uwb_dev);
357 return 1;
358 } else
359 return 0;
360}
361
362
363/**
364 * Given a UWB device descriptor, validate and refcount it
365 *
366 * @returns NULL if the device does not exist or is quiescing; the ptr to
367 * it otherwise.
368 */
369struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev)
370{
371 if (uwb_dev_for_each(rc, __uwb_dev_try_get, uwb_dev))
372 return uwb_dev;
373 else
374 return NULL;
375}
376EXPORT_SYMBOL_GPL(uwb_dev_try_get);
377
378
379/**
380 * Remove a device from the system [grunt for other functions]
381 */
382int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc)
383{
384 struct device *dev = &uwb_dev->dev;
385 char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
386
387 d_fnstart(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p)\n", dev, uwb_dev, rc);
388 uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr);
389 uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr);
390 dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n",
391 macbuf, devbuf,
392 rc ? rc->uwb_dev.dev.parent->bus->name : "n/a",
393 rc ? dev_name(rc->uwb_dev.dev.parent) : "");
394 uwb_dev_rm(uwb_dev);
395 uwb_dev_put(uwb_dev); /* for the creation in _onair() */
396 d_fnend(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p) = 0\n", dev, uwb_dev, rc);
397 return 0;
398}
399
400
401/**
402 * A device went off the air, clean up after it!
403 *
404 * This is called by the UWB Daemon (through the beacon purge function
405 * uwb_bcn_cache_purge) when it is detected that a device has been in
406 * radio silence for a while.
407 *
408 * If this device is actually a local radio controller we don't need
409 * to go through the offair process, as it is not registered as that.
410 *
411 * NOTE: uwb_bcn_cache.mutex is held!
412 */
413void uwbd_dev_offair(struct uwb_beca_e *bce)
414{
415 struct uwb_dev *uwb_dev;
416
417 uwb_dev = bce->uwb_dev;
418 if (uwb_dev) {
419 uwb_notify(uwb_dev->rc, uwb_dev, UWB_NOTIF_OFFAIR);
420 __uwb_dev_offair(uwb_dev, uwb_dev->rc);
421 }
422}
423
424
425/**
426 * A device went on the air, start it up!
427 *
428 * This is called by the UWB Daemon when it is detected that a device
429 * has popped up in the radio range of the radio controller.
430 *
431 * It will just create the freaking device, register the beacon and
432 * stuff and yatla, done.
433 *
434 *
435 * NOTE: uwb_beca.mutex is held, bce->mutex is held
436 */
437void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce)
438{
439 int result;
440 struct device *dev = &rc->uwb_dev.dev;
441 struct uwb_dev *uwb_dev;
442 char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
443
444 uwb_mac_addr_print(macbuf, sizeof(macbuf), bce->mac_addr);
445 uwb_dev_addr_print(devbuf, sizeof(devbuf), &bce->dev_addr);
446 uwb_dev = kzalloc(sizeof(struct uwb_dev), GFP_KERNEL);
447 if (uwb_dev == NULL) {
448 dev_err(dev, "new device %s: Cannot allocate memory\n",
449 macbuf);
450 return;
451 }
452 uwb_dev_init(uwb_dev); /* This sets refcnt to one, we own it */
453 uwb_dev->mac_addr = *bce->mac_addr;
454 uwb_dev->dev_addr = bce->dev_addr;
455 dev_set_name(&uwb_dev->dev, macbuf);
456 result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc);
457 if (result < 0) {
458 dev_err(dev, "new device %s: cannot instantiate device\n",
459 macbuf);
460 goto error_dev_add;
461 }
462 /* plug the beacon cache */
463 bce->uwb_dev = uwb_dev;
464 uwb_dev->bce = bce;
465 uwb_bce_get(bce); /* released in uwb_dev_sys_release() */
466 dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n",
467 macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name,
468 dev_name(rc->uwb_dev.dev.parent));
469 uwb_notify(rc, uwb_dev, UWB_NOTIF_ONAIR);
470 return;
471
472error_dev_add:
473 kfree(uwb_dev);
474 return;
475}
476
477/**
478 * Iterate over the list of UWB devices, calling a @function on each
479 *
480 * See docs for bus_for_each()....
481 *
482 * @rc: radio controller for the devices.
483 * @function: function to call.
484 * @priv: data to pass to @function.
485 * @returns: 0 if no invocation of function() returned a value
486 * different to zero. That value otherwise.
487 */
488int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f function, void *priv)
489{
490 return device_for_each_child(&rc->uwb_dev.dev, priv, function);
491}
492EXPORT_SYMBOL_GPL(uwb_dev_for_each);
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c
new file mode 100644
index 000000000000..ee5772f00d42
--- /dev/null
+++ b/drivers/uwb/lc-rc.c
@@ -0,0 +1,495 @@
1/*
2 * Ultra Wide Band
3 * Life cycle of radio controllers
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 *
25 * A UWB radio controller is also a UWB device, so it embeds one...
26 *
27 * List of RCs comes from the 'struct class uwb_rc_class'.
28 */
29
30#include <linux/kernel.h>
31#include <linux/string.h>
32#include <linux/device.h>
33#include <linux/err.h>
34#include <linux/random.h>
35#include <linux/kdev_t.h>
36#include <linux/etherdevice.h>
37#include <linux/usb.h>
38
39#define D_LOCAL 1
40#include <linux/uwb/debug.h>
41#include "uwb-internal.h"
42
43static int uwb_rc_index_match(struct device *dev, void *data)
44{
45 int *index = data;
46 struct uwb_rc *rc = dev_get_drvdata(dev);
47
48 if (rc->index == *index)
49 return 1;
50 return 0;
51}
52
53static struct uwb_rc *uwb_rc_find_by_index(int index)
54{
55 struct device *dev;
56 struct uwb_rc *rc = NULL;
57
58 dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match);
59 if (dev)
60 rc = dev_get_drvdata(dev);
61 return rc;
62}
63
64static int uwb_rc_new_index(void)
65{
66 int index = 0;
67
68 for (;;) {
69 if (!uwb_rc_find_by_index(index))
70 return index;
71 if (++index < 0)
72 index = 0;
73 }
74}
75
76/**
77 * Release the backing device of a uwb_rc that has been dynamically allocated.
78 */
79static void uwb_rc_sys_release(struct device *dev)
80{
81 struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev);
82 struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev);
83
84 uwb_rc_neh_destroy(rc);
85 uwb_rc_ie_release(rc);
86 d_printf(1, dev, "freed uwb_rc %p\n", rc);
87 kfree(rc);
88}
89
90
91void uwb_rc_init(struct uwb_rc *rc)
92{
93 struct uwb_dev *uwb_dev = &rc->uwb_dev;
94
95 uwb_dev_init(uwb_dev);
96 rc->uwb_dev.dev.class = &uwb_rc_class;
97 rc->uwb_dev.dev.release = uwb_rc_sys_release;
98 uwb_rc_neh_create(rc);
99 rc->beaconing = -1;
100 rc->scan_type = UWB_SCAN_DISABLED;
101 INIT_LIST_HEAD(&rc->notifs_chain.list);
102 mutex_init(&rc->notifs_chain.mutex);
103 uwb_drp_avail_init(rc);
104 uwb_rc_ie_init(rc);
105 uwb_rsv_init(rc);
106 uwb_rc_pal_init(rc);
107}
108EXPORT_SYMBOL_GPL(uwb_rc_init);
109
110
111struct uwb_rc *uwb_rc_alloc(void)
112{
113 struct uwb_rc *rc;
114 rc = kzalloc(sizeof(*rc), GFP_KERNEL);
115 if (rc == NULL)
116 return NULL;
117 uwb_rc_init(rc);
118 return rc;
119}
120EXPORT_SYMBOL_GPL(uwb_rc_alloc);
121
122static struct attribute *rc_attrs[] = {
123 &dev_attr_mac_address.attr,
124 &dev_attr_scan.attr,
125 &dev_attr_beacon.attr,
126 NULL,
127};
128
129static struct attribute_group rc_attr_group = {
130 .attrs = rc_attrs,
131};
132
133/*
134 * Registration of sysfs specific stuff
135 */
136static int uwb_rc_sys_add(struct uwb_rc *rc)
137{
138 return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
139}
140
141
142static void __uwb_rc_sys_rm(struct uwb_rc *rc)
143{
144 sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group);
145}
146
147/**
148 * uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it
149 * @rc: the radio controller.
150 *
151 * If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF
152 * then a random locally administered EUI-48 is generated and set on
153 * the device. The probability of address collisions is sufficiently
154 * unlikely (1/2^40 = 9.1e-13) that they're not checked for.
155 */
156static
157int uwb_rc_mac_addr_setup(struct uwb_rc *rc)
158{
159 int result;
160 struct device *dev = &rc->uwb_dev.dev;
161 struct uwb_dev *uwb_dev = &rc->uwb_dev;
162 char devname[UWB_ADDR_STRSIZE];
163 struct uwb_mac_addr addr;
164
165 result = uwb_rc_mac_addr_get(rc, &addr);
166 if (result < 0) {
167 dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result);
168 return result;
169 }
170
171 if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) {
172 addr.data[0] = 0x02; /* locally adminstered and unicast */
173 get_random_bytes(&addr.data[1], sizeof(addr.data)-1);
174
175 result = uwb_rc_mac_addr_set(rc, &addr);
176 if (result < 0) {
177 uwb_mac_addr_print(devname, sizeof(devname), &addr);
178 dev_err(dev, "cannot set EUI-48 address %s: %d\n",
179 devname, result);
180 return result;
181 }
182 }
183 uwb_dev->mac_addr = addr;
184 return 0;
185}
186
187
188
189static int uwb_rc_setup(struct uwb_rc *rc)
190{
191 int result;
192 struct device *dev = &rc->uwb_dev.dev;
193
194 result = uwb_rc_reset(rc);
195 if (result < 0) {
196 dev_err(dev, "cannot reset UWB radio: %d\n", result);
197 goto error;
198 }
199 result = uwb_rc_mac_addr_setup(rc);
200 if (result < 0) {
201 dev_err(dev, "cannot setup UWB MAC address: %d\n", result);
202 goto error;
203 }
204 result = uwb_rc_dev_addr_assign(rc);
205 if (result < 0) {
206 dev_err(dev, "cannot assign UWB DevAddr: %d\n", result);
207 goto error;
208 }
209 result = uwb_rc_ie_setup(rc);
210 if (result < 0) {
211 dev_err(dev, "cannot setup IE subsystem: %d\n", result);
212 goto error_ie_setup;
213 }
214 result = uwb_rsv_setup(rc);
215 if (result < 0) {
216 dev_err(dev, "cannot setup reservation subsystem: %d\n", result);
217 goto error_rsv_setup;
218 }
219 uwb_dbg_add_rc(rc);
220 return 0;
221
222error_rsv_setup:
223 uwb_rc_ie_release(rc);
224error_ie_setup:
225error:
226 return result;
227}
228
229
230/**
231 * Register a new UWB radio controller
232 *
233 * Did you call uwb_rc_init() on your rc?
234 *
235 * We assume that this is being called with a > 0 refcount on
236 * it [through ops->{get|put}_device(). We'll take our own, though.
237 *
238 * @parent_dev is our real device, the one that provides the actual UWB device
239 */
240int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv)
241{
242 int result;
243 struct device *dev;
244 char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE];
245
246 rc->index = uwb_rc_new_index();
247
248 dev = &rc->uwb_dev.dev;
249 dev_set_name(dev, "uwb%d", rc->index);
250
251 rc->priv = priv;
252
253 result = rc->start(rc);
254 if (result < 0)
255 goto error_rc_start;
256
257 result = uwb_rc_setup(rc);
258 if (result < 0) {
259 dev_err(dev, "cannot setup UWB radio controller: %d\n", result);
260 goto error_rc_setup;
261 }
262
263 result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc);
264 if (result < 0 && result != -EADDRNOTAVAIL)
265 goto error_dev_add;
266
267 result = uwb_rc_sys_add(rc);
268 if (result < 0) {
269 dev_err(parent_dev, "cannot register UWB radio controller "
270 "dev attributes: %d\n", result);
271 goto error_sys_add;
272 }
273
274 uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr);
275 uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr);
276 dev_info(dev,
277 "new uwb radio controller (mac %s dev %s) on %s %s\n",
278 macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev));
279 rc->ready = 1;
280 return 0;
281
282error_sys_add:
283 uwb_dev_rm(&rc->uwb_dev);
284error_dev_add:
285error_rc_setup:
286 rc->stop(rc);
287 uwbd_flush(rc);
288error_rc_start:
289 return result;
290}
291EXPORT_SYMBOL_GPL(uwb_rc_add);
292
293
294static int uwb_dev_offair_helper(struct device *dev, void *priv)
295{
296 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
297
298 return __uwb_dev_offair(uwb_dev, uwb_dev->rc);
299}
300
301/*
302 * Remove a Radio Controller; stop beaconing/scanning, disconnect all children
303 */
304void uwb_rc_rm(struct uwb_rc *rc)
305{
306 rc->ready = 0;
307
308 uwb_dbg_del_rc(rc);
309 uwb_rsv_cleanup(rc);
310 uwb_rc_ie_rm(rc, UWB_IDENTIFICATION_IE);
311 if (rc->beaconing >= 0)
312 uwb_rc_beacon(rc, -1, 0);
313 if (rc->scan_type != UWB_SCAN_DISABLED)
314 uwb_rc_scan(rc, rc->scanning, UWB_SCAN_DISABLED, 0);
315 uwb_rc_reset(rc);
316
317 rc->stop(rc);
318 uwbd_flush(rc);
319
320 uwb_dev_lock(&rc->uwb_dev);
321 rc->priv = NULL;
322 rc->cmd = NULL;
323 uwb_dev_unlock(&rc->uwb_dev);
324 mutex_lock(&uwb_beca.mutex);
325 uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL);
326 __uwb_rc_sys_rm(rc);
327 mutex_unlock(&uwb_beca.mutex);
328 uwb_dev_rm(&rc->uwb_dev);
329}
330EXPORT_SYMBOL_GPL(uwb_rc_rm);
331
332static int find_rc_try_get(struct device *dev, void *data)
333{
334 struct uwb_rc *target_rc = data;
335 struct uwb_rc *rc = dev_get_drvdata(dev);
336
337 if (rc == NULL) {
338 WARN_ON(1);
339 return 0;
340 }
341 if (rc == target_rc) {
342 if (rc->ready == 0)
343 return 0;
344 else
345 return 1;
346 }
347 return 0;
348}
349
350/**
351 * Given a radio controller descriptor, validate and refcount it
352 *
353 * @returns NULL if the rc does not exist or is quiescing; the ptr to
354 * it otherwise.
355 */
356struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc)
357{
358 struct device *dev;
359 struct uwb_rc *rc = NULL;
360
361 dev = class_find_device(&uwb_rc_class, NULL, target_rc,
362 find_rc_try_get);
363 if (dev) {
364 rc = dev_get_drvdata(dev);
365 __uwb_rc_get(rc);
366 }
367 return rc;
368}
369EXPORT_SYMBOL_GPL(__uwb_rc_try_get);
370
371/*
372 * RC get for external refcount acquirers...
373 *
374 * Increments the refcount of the device and it's backend modules
375 */
376static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc)
377{
378 if (rc->ready == 0)
379 return NULL;
380 uwb_dev_get(&rc->uwb_dev);
381 return rc;
382}
383
384static int find_rc_grandpa(struct device *dev, void *data)
385{
386 struct device *grandpa_dev = data;
387 struct uwb_rc *rc = dev_get_drvdata(dev);
388
389 if (rc->uwb_dev.dev.parent->parent == grandpa_dev) {
390 rc = uwb_rc_get(rc);
391 return 1;
392 }
393 return 0;
394}
395
396/**
397 * Locate and refcount a radio controller given a common grand-parent
398 *
399 * @grandpa_dev Pointer to the 'grandparent' device structure.
400 * @returns NULL If the rc does not exist or is quiescing; the ptr to
401 * it otherwise, properly referenced.
402 *
403 * The Radio Control interface (or the UWB Radio Controller) is always
404 * an interface of a device. The parent is the interface, the
405 * grandparent is the device that encapsulates the interface.
406 *
407 * There is no need to lock around as the "grandpa" would be
408 * refcounted by the target, and to remove the referemes, the
409 * uwb_rc_class->sem would have to be taken--we hold it, ergo we
410 * should be safe.
411 */
412struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev)
413{
414 struct device *dev;
415 struct uwb_rc *rc = NULL;
416
417 dev = class_find_device(&uwb_rc_class, NULL, (void *)grandpa_dev,
418 find_rc_grandpa);
419 if (dev)
420 rc = dev_get_drvdata(dev);
421 return rc;
422}
423EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa);
424
425/**
426 * Find a radio controller by device address
427 *
428 * @returns the pointer to the radio controller, properly referenced
429 */
430static int find_rc_dev(struct device *dev, void *data)
431{
432 struct uwb_dev_addr *addr = data;
433 struct uwb_rc *rc = dev_get_drvdata(dev);
434
435 if (rc == NULL) {
436 WARN_ON(1);
437 return 0;
438 }
439 if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) {
440 rc = uwb_rc_get(rc);
441 return 1;
442 }
443 return 0;
444}
445
446struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr)
447{
448 struct device *dev;
449 struct uwb_rc *rc = NULL;
450
451 dev = class_find_device(&uwb_rc_class, NULL, (void *)addr,
452 find_rc_dev);
453 if (dev)
454 rc = dev_get_drvdata(dev);
455
456 return rc;
457}
458EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev);
459
460/**
461 * Drop a reference on a radio controller
462 *
463 * This is the version that should be done by entities external to the
464 * UWB Radio Control stack (ie: clients of the API).
465 */
466void uwb_rc_put(struct uwb_rc *rc)
467{
468 __uwb_rc_put(rc);
469}
470EXPORT_SYMBOL_GPL(uwb_rc_put);
471
472/*
473 *
474 *
475 */
476ssize_t uwb_rc_print_IEs(struct uwb_rc *uwb_rc, char *buf, size_t size)
477{
478 ssize_t result;
479 struct uwb_rc_evt_get_ie *ie_info;
480 struct uwb_buf_ctx ctx;
481
482 result = uwb_rc_get_ie(uwb_rc, &ie_info);
483 if (result < 0)
484 goto error_get_ie;
485 ctx.buf = buf;
486 ctx.size = size;
487 ctx.bytes = 0;
488 uwb_ie_for_each(&uwb_rc->uwb_dev, uwb_ie_dump_hex, &ctx,
489 ie_info->IEData, result - sizeof(*ie_info));
490 result = ctx.bytes;
491 kfree(ie_info);
492error_get_ie:
493 return result;
494}
495
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
new file mode 100644
index 000000000000..9b4eb64327ac
--- /dev/null
+++ b/drivers/uwb/neh.c
@@ -0,0 +1,616 @@
1/*
2 * WUSB Wire Adapter: Radio Control Interface (WUSB[8])
3 * Notification and Event Handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * The RC interface of the Host Wire Adapter (USB dongle) or WHCI PCI
24 * card delivers a stream of notifications and events to the
25 * notification end event endpoint or area. This code takes care of
26 * getting a buffer with that data, breaking it up in separate
27 * notifications and events and then deliver those.
28 *
29 * Events are answers to commands and they carry a context ID that
30 * associates them to the command. Notifications are that,
31 * notifications, they come out of the blue and have a context ID of
32 * zero. Think of the context ID kind of like a handler. The
33 * uwb_rc_neh_* code deals with managing context IDs.
34 *
35 * This is why you require a handle to operate on a UWB host. When you
36 * open a handle a context ID is assigned to you.
37 *
38 * So, as it is done is:
39 *
40 * 1. Add an event handler [uwb_rc_neh_add()] (assigns a ctx id)
41 * 2. Issue command [rc->cmd(rc, ...)]
42 * 3. Arm the timeout timer [uwb_rc_neh_arm()]
43 * 4, Release the reference to the neh [uwb_rc_neh_put()]
44 * 5. Wait for the callback
45 * 6. Command result (RCEB) is passed to the callback
46 *
47 * If (2) fails, you should remove the handle [uwb_rc_neh_rm()]
48 * instead of arming the timer.
49 *
50 * Handles are for using in *serialized* code, single thread.
51 *
52 * When the notification/event comes, the IRQ handler/endpoint
53 * callback passes the data read to uwb_rc_neh_grok() which will break
54 * it up in a discrete series of events, look up who is listening for
55 * them and execute the pertinent callbacks.
56 *
57 * If the reader detects an error while reading the data stream, call
58 * uwb_rc_neh_error().
59 *
60 * CONSTRAINTS/ASSUMPTIONS:
61 *
62 * - Most notifications/events are small (less thank .5k), copying
63 * around is ok.
64 *
65 * - Notifications/events are ALWAYS smaller than PAGE_SIZE
66 *
67 * - Notifications/events always come in a single piece (ie: a buffer
68 * will always contain entire notifications/events).
69 *
70 * - we cannot know in advance how long each event is (because they
71 * lack a length field in their header--smart move by the standards
72 * body, btw). So we need a facility to get the event size given the
73 * header. This is what the EST code does (notif/Event Size
74 * Tables), check nest.c--as well, you can associate the size to
75 * the handle [w/ neh->extra_size()].
76 *
77 * - Most notifications/events are fixed size; only a few are variable
78 * size (NEST takes care of that).
79 *
80 * - Listeners of events expect them, so they usually provide a
81 * buffer, as they know the size. Listeners to notifications don't,
82 * so we allocate their buffers dynamically.
83 */
84#include <linux/kernel.h>
85#include <linux/timer.h>
86#include <linux/err.h>
87
88#include "uwb-internal.h"
89#define D_LOCAL 0
90#include <linux/uwb/debug.h>
91
92/*
93 * UWB Radio Controller Notification/Event Handle
94 *
95 * Represents an entity waiting for an event coming from the UWB Radio
96 * Controller with a given context id (context) and type (evt_type and
97 * evt). On reception of the notification/event, the callback (cb) is
98 * called with the event.
99 *
100 * If the timer expires before the event is received, the callback is
101 * called with -ETIMEDOUT as the event size.
102 */
103struct uwb_rc_neh {
104 struct kref kref;
105
106 struct uwb_rc *rc;
107 u8 evt_type;
108 __le16 evt;
109 u8 context;
110 uwb_rc_cmd_cb_f cb;
111 void *arg;
112
113 struct timer_list timer;
114 struct list_head list_node;
115};
116
117static void uwb_rc_neh_timer(unsigned long arg);
118
119static void uwb_rc_neh_release(struct kref *kref)
120{
121 struct uwb_rc_neh *neh = container_of(kref, struct uwb_rc_neh, kref);
122
123 kfree(neh);
124}
125
126static void uwb_rc_neh_get(struct uwb_rc_neh *neh)
127{
128 kref_get(&neh->kref);
129}
130
131/**
132 * uwb_rc_neh_put - release reference to a neh
133 * @neh: the neh
134 */
135void uwb_rc_neh_put(struct uwb_rc_neh *neh)
136{
137 kref_put(&neh->kref, uwb_rc_neh_release);
138}
139
140
141/**
142 * Assigns @neh a context id from @rc's pool
143 *
144 * @rc: UWB Radio Controller descriptor; @rc->neh_lock taken
145 * @neh: Notification/Event Handle
146 * @returns 0 if context id was assigned ok; < 0 errno on error (if
147 * all the context IDs are taken).
148 *
149 * (assumes @wa is locked).
150 *
151 * NOTE: WUSB spec reserves context ids 0x00 for notifications and
152 * 0xff is invalid, so they must not be used. Initialization
153 * fills up those two in the bitmap so they are not allocated.
154 *
155 * We spread the allocation around to reduce the posiblity of two
156 * consecutive opened @neh's getting the same context ID assigned (to
157 * avoid surprises with late events that timed out long time ago). So
158 * first we search from where @rc->ctx_roll is, if not found, we
159 * search from zero.
160 */
161static
162int __uwb_rc_ctx_get(struct uwb_rc *rc, struct uwb_rc_neh *neh)
163{
164 int result;
165 result = find_next_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX,
166 rc->ctx_roll++);
167 if (result < UWB_RC_CTX_MAX)
168 goto found;
169 result = find_first_zero_bit(rc->ctx_bm, UWB_RC_CTX_MAX);
170 if (result < UWB_RC_CTX_MAX)
171 goto found;
172 return -ENFILE;
173found:
174 set_bit(result, rc->ctx_bm);
175 neh->context = result;
176 return 0;
177}
178
179
180/** Releases @neh's context ID back to @rc (@rc->neh_lock is locked). */
181static
182void __uwb_rc_ctx_put(struct uwb_rc *rc, struct uwb_rc_neh *neh)
183{
184 struct device *dev = &rc->uwb_dev.dev;
185 if (neh->context == 0)
186 return;
187 if (test_bit(neh->context, rc->ctx_bm) == 0) {
188 dev_err(dev, "context %u not set in bitmap\n",
189 neh->context);
190 WARN_ON(1);
191 }
192 clear_bit(neh->context, rc->ctx_bm);
193 neh->context = 0;
194}
195
196/**
197 * uwb_rc_neh_add - add a neh for a radio controller command
198 * @rc: the radio controller
199 * @cmd: the radio controller command
200 * @expected_type: the type of the expected response event
201 * @expected_event: the expected event ID
202 * @cb: callback for when the event is received
203 * @arg: argument for the callback
204 *
205 * Creates a neh and adds it to the list of those waiting for an
206 * event. A context ID will be assigned to the command.
207 */
208struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
209 u8 expected_type, u16 expected_event,
210 uwb_rc_cmd_cb_f cb, void *arg)
211{
212 int result;
213 unsigned long flags;
214 struct device *dev = &rc->uwb_dev.dev;
215 struct uwb_rc_neh *neh;
216
217 neh = kzalloc(sizeof(*neh), GFP_KERNEL);
218 if (neh == NULL) {
219 result = -ENOMEM;
220 goto error_kzalloc;
221 }
222
223 kref_init(&neh->kref);
224 INIT_LIST_HEAD(&neh->list_node);
225 init_timer(&neh->timer);
226 neh->timer.function = uwb_rc_neh_timer;
227 neh->timer.data = (unsigned long)neh;
228
229 neh->rc = rc;
230 neh->evt_type = expected_type;
231 neh->evt = cpu_to_le16(expected_event);
232 neh->cb = cb;
233 neh->arg = arg;
234
235 spin_lock_irqsave(&rc->neh_lock, flags);
236 result = __uwb_rc_ctx_get(rc, neh);
237 if (result >= 0) {
238 cmd->bCommandContext = neh->context;
239 list_add_tail(&neh->list_node, &rc->neh_list);
240 uwb_rc_neh_get(neh);
241 }
242 spin_unlock_irqrestore(&rc->neh_lock, flags);
243 if (result < 0)
244 goto error_ctx_get;
245
246 return neh;
247
248error_ctx_get:
249 kfree(neh);
250error_kzalloc:
251 dev_err(dev, "cannot open handle to radio controller: %d\n", result);
252 return ERR_PTR(result);
253}
254
255static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
256{
257 del_timer(&neh->timer);
258 __uwb_rc_ctx_put(rc, neh);
259 list_del(&neh->list_node);
260}
261
262/**
263 * uwb_rc_neh_rm - remove a neh.
264 * @rc: the radio controller
265 * @neh: the neh to remove
266 *
267 * Remove an active neh immediately instead of waiting for the event
268 * (or a time out).
269 */
270void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
271{
272 unsigned long flags;
273
274 spin_lock_irqsave(&rc->neh_lock, flags);
275 __uwb_rc_neh_rm(rc, neh);
276 spin_unlock_irqrestore(&rc->neh_lock, flags);
277
278 uwb_rc_neh_put(neh);
279}
280
281/**
282 * uwb_rc_neh_arm - arm an event handler timeout timer
283 *
284 * @rc: UWB Radio Controller
285 * @neh: Notification/event handler for @rc
286 *
287 * The timer is only armed if the neh is active.
288 */
289void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh)
290{
291 unsigned long flags;
292
293 spin_lock_irqsave(&rc->neh_lock, flags);
294 if (neh->context)
295 mod_timer(&neh->timer,
296 jiffies + msecs_to_jiffies(UWB_RC_CMD_TIMEOUT_MS));
297 spin_unlock_irqrestore(&rc->neh_lock, flags);
298}
299
300static void uwb_rc_neh_cb(struct uwb_rc_neh *neh, struct uwb_rceb *rceb, size_t size)
301{
302 (*neh->cb)(neh->rc, neh->arg, rceb, size);
303 uwb_rc_neh_put(neh);
304}
305
306static bool uwb_rc_neh_match(struct uwb_rc_neh *neh, const struct uwb_rceb *rceb)
307{
308 return neh->evt_type == rceb->bEventType
309 && neh->evt == rceb->wEvent
310 && neh->context == rceb->bEventContext;
311}
312
313/**
314 * Find the handle waiting for a RC Radio Control Event
315 *
316 * @rc: UWB Radio Controller
317 * @rceb: Pointer to the RCEB buffer
318 * @event_size: Pointer to the size of the RCEB buffer. Might be
319 * adjusted to take into account the @neh->extra_size
320 * settings.
321 *
322 * If the listener has no buffer (NULL buffer), one is allocated for
323 * the right size (the amount of data received). @neh->ptr will point
324 * to the event payload, which always starts with a 'struct
325 * uwb_rceb'. kfree() it when done.
326 */
327static
328struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc,
329 const struct uwb_rceb *rceb)
330{
331 struct uwb_rc_neh *neh = NULL, *h;
332 unsigned long flags;
333
334 spin_lock_irqsave(&rc->neh_lock, flags);
335
336 list_for_each_entry(h, &rc->neh_list, list_node) {
337 if (uwb_rc_neh_match(h, rceb)) {
338 neh = h;
339 break;
340 }
341 }
342
343 if (neh)
344 __uwb_rc_neh_rm(rc, neh);
345
346 spin_unlock_irqrestore(&rc->neh_lock, flags);
347
348 return neh;
349}
350
351
352/**
353 * Process notifications coming from the radio control interface
354 *
355 * @rc: UWB Radio Control Interface descriptor
356 * @neh: Notification/Event Handler @neh->ptr points to
357 * @uwb_evt->buffer.
358 *
359 * This function is called by the event/notif handling subsystem when
360 * notifications arrive (hwarc_probe() arms a notification/event handle
361 * that calls back this function for every received notification; this
362 * function then will rearm itself).
363 *
364 * Notification data buffers are dynamically allocated by the NEH
365 * handling code in neh.c [uwb_rc_neh_lookup()]. What is actually
366 * allocated is space to contain the notification data.
367 *
368 * Buffers are prefixed with a Radio Control Event Block (RCEB) as
369 * defined by the WUSB Wired-Adapter Radio Control interface. We
370 * just use it for the notification code.
371 *
372 * On each case statement we just transcode endianess of the different
373 * fields. We declare a pointer to a RCI definition of an event, and
374 * then to a UWB definition of the same event (which are the same,
375 * remember). Event if we use different pointers
376 */
377static
378void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size)
379{
380 struct device *dev = &rc->uwb_dev.dev;
381 struct uwb_event *uwb_evt;
382
383 if (size == -ESHUTDOWN)
384 return;
385 if (size < 0) {
386 dev_err(dev, "ignoring event with error code %zu\n",
387 size);
388 return;
389 }
390
391 uwb_evt = kzalloc(sizeof(*uwb_evt), GFP_ATOMIC);
392 if (unlikely(uwb_evt == NULL)) {
393 dev_err(dev, "no memory to queue event 0x%02x/%04x/%02x\n",
394 rceb->bEventType, le16_to_cpu(rceb->wEvent),
395 rceb->bEventContext);
396 return;
397 }
398 uwb_evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */
399 uwb_evt->ts_jiffies = jiffies;
400 uwb_evt->type = UWB_EVT_TYPE_NOTIF;
401 uwb_evt->notif.size = size;
402 uwb_evt->notif.rceb = rceb;
403
404 switch (le16_to_cpu(rceb->wEvent)) {
405 /* Trap some vendor specific events
406 *
407 * FIXME: move this to handling in ptc-est, where we
408 * register a NULL event handler for these two guys
409 * using the Intel IDs.
410 */
411 case 0x0103:
412 dev_info(dev, "FIXME: DEVICE ADD\n");
413 return;
414 case 0x0104:
415 dev_info(dev, "FIXME: DEVICE RM\n");
416 return;
417 default:
418 break;
419 }
420
421 uwbd_event_queue(uwb_evt);
422}
423
424static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size_t size)
425{
426 struct device *dev = &rc->uwb_dev.dev;
427 struct uwb_rc_neh *neh;
428 struct uwb_rceb *notif;
429
430 if (rceb->bEventContext == 0) {
431 notif = kmalloc(size, GFP_ATOMIC);
432 if (notif) {
433 memcpy(notif, rceb, size);
434 uwb_rc_notif(rc, notif, size);
435 } else
436 dev_err(dev, "event 0x%02x/%04x/%02x (%zu bytes): no memory\n",
437 rceb->bEventType, le16_to_cpu(rceb->wEvent),
438 rceb->bEventContext, size);
439 } else {
440 neh = uwb_rc_neh_lookup(rc, rceb);
441 if (neh)
442 uwb_rc_neh_cb(neh, rceb, size);
443 else
444 dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
445 rceb->bEventType, le16_to_cpu(rceb->wEvent),
446 rceb->bEventContext, size);
447 }
448}
449
450/**
451 * Given a buffer with one or more UWB RC events/notifications, break
452 * them up and dispatch them.
453 *
454 * @rc: UWB Radio Controller
455 * @buf: Buffer with the stream of notifications/events
456 * @buf_size: Amount of data in the buffer
457 *
458 * Note each notification/event starts always with a 'struct
459 * uwb_rceb', so the minimum size if 4 bytes.
460 *
461 * The device may pass us events formatted differently than expected.
462 * These are first filtered, potentially creating a new event in a new
463 * memory location. If a new event is created by the filter it is also
464 * freed here.
465 *
466 * For each notif/event, tries to guess the size looking at the EST
467 * tables, then looks for a neh that is waiting for that event and if
468 * found, copies the payload to the neh's buffer and calls it back. If
469 * not, the data is ignored.
470 *
471 * Note that if we can't find a size description in the EST tables, we
472 * still might find a size in the 'neh' handle in uwb_rc_neh_lookup().
473 *
474 * Assumptions:
475 *
476 * @rc->neh_lock is NOT taken
477 *
478 * We keep track of various sizes here:
479 * size: contains the size of the buffer that is processed for the
480 * incoming event. this buffer may contain events that are not
481 * formatted as WHCI.
482 * real_size: the actual space taken by this event in the buffer.
483 * We need to keep track of the real size of an event to be able to
484 * advance the buffer correctly.
485 * event_size: the size of the event as expected by the core layer
486 * [OR] the size of the event after filtering. if the filtering
487 * created a new event in a new memory location then this is
488 * effectively the size of a new event buffer
489 */
490void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size)
491{
492 struct device *dev = &rc->uwb_dev.dev;
493 void *itr;
494 struct uwb_rceb *rceb;
495 size_t size, real_size, event_size;
496 int needtofree;
497
498 d_fnstart(3, dev, "(rc %p buf %p %zu buf_size)\n", rc, buf, buf_size);
499 d_printf(2, dev, "groking event block: %zu bytes\n", buf_size);
500 itr = buf;
501 size = buf_size;
502 while (size > 0) {
503 if (size < sizeof(*rceb)) {
504 dev_err(dev, "not enough data in event buffer to "
505 "process incoming events (%zu left, minimum is "
506 "%zu)\n", size, sizeof(*rceb));
507 break;
508 }
509
510 rceb = itr;
511 if (rc->filter_event) {
512 needtofree = rc->filter_event(rc, &rceb, size,
513 &real_size, &event_size);
514 if (needtofree < 0 && needtofree != -ENOANO) {
515 dev_err(dev, "BUG: Unable to filter event "
516 "(0x%02x/%04x/%02x) from "
517 "device. \n", rceb->bEventType,
518 le16_to_cpu(rceb->wEvent),
519 rceb->bEventContext);
520 break;
521 }
522 } else
523 needtofree = -ENOANO;
524 /* do real processing if there was no filtering or the
525 * filtering didn't act */
526 if (needtofree == -ENOANO) {
527 ssize_t ret = uwb_est_find_size(rc, rceb, size);
528 if (ret < 0)
529 break;
530 if (ret > size) {
531 dev_err(dev, "BUG: hw sent incomplete event "
532 "0x%02x/%04x/%02x (%zd bytes), only got "
533 "%zu bytes. We don't handle that.\n",
534 rceb->bEventType, le16_to_cpu(rceb->wEvent),
535 rceb->bEventContext, ret, size);
536 break;
537 }
538 real_size = event_size = ret;
539 }
540 uwb_rc_neh_grok_event(rc, rceb, event_size);
541
542 if (needtofree == 1)
543 kfree(rceb);
544
545 itr += real_size;
546 size -= real_size;
547 d_printf(2, dev, "consumed %zd bytes, %zu left\n",
548 event_size, size);
549 }
550 d_fnend(3, dev, "(rc %p buf %p %zu buf_size) = void\n", rc, buf, buf_size);
551}
552EXPORT_SYMBOL_GPL(uwb_rc_neh_grok);
553
554
555/**
556 * The entity that reads from the device notification/event channel has
557 * detected an error.
558 *
559 * @rc: UWB Radio Controller
560 * @error: Errno error code
561 *
562 */
563void uwb_rc_neh_error(struct uwb_rc *rc, int error)
564{
565 struct uwb_rc_neh *neh, *next;
566 unsigned long flags;
567
568 BUG_ON(error >= 0);
569 spin_lock_irqsave(&rc->neh_lock, flags);
570 list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) {
571 __uwb_rc_neh_rm(rc, neh);
572 uwb_rc_neh_cb(neh, NULL, error);
573 }
574 spin_unlock_irqrestore(&rc->neh_lock, flags);
575}
576EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
577
578
579static void uwb_rc_neh_timer(unsigned long arg)
580{
581 struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg;
582 struct uwb_rc *rc = neh->rc;
583 unsigned long flags;
584
585 spin_lock_irqsave(&rc->neh_lock, flags);
586 __uwb_rc_neh_rm(rc, neh);
587 spin_unlock_irqrestore(&rc->neh_lock, flags);
588
589 uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT);
590}
591
592/** Initializes the @rc's neh subsystem
593 */
594void uwb_rc_neh_create(struct uwb_rc *rc)
595{
596 spin_lock_init(&rc->neh_lock);
597 INIT_LIST_HEAD(&rc->neh_list);
598 set_bit(0, rc->ctx_bm); /* 0 is reserved (see [WUSB] table 8-65) */
599 set_bit(0xff, rc->ctx_bm); /* and 0xff is invalid */
600 rc->ctx_roll = 1;
601}
602
603
604/** Release's the @rc's neh subsystem */
605void uwb_rc_neh_destroy(struct uwb_rc *rc)
606{
607 unsigned long flags;
608 struct uwb_rc_neh *neh, *next;
609
610 spin_lock_irqsave(&rc->neh_lock, flags);
611 list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) {
612 __uwb_rc_neh_rm(rc, neh);
613 uwb_rc_neh_put(neh);
614 }
615 spin_unlock_irqrestore(&rc->neh_lock, flags);
616}
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
new file mode 100644
index 000000000000..1afb38eacb9a
--- /dev/null
+++ b/drivers/uwb/pal.c
@@ -0,0 +1,91 @@
1/*
2 * UWB PAL support.
3 *
4 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/kernel.h>
19#include <linux/uwb.h>
20
21#include "uwb-internal.h"
22
23/**
24 * uwb_pal_init - initialize a UWB PAL
25 * @pal: the PAL to initialize
26 */
27void uwb_pal_init(struct uwb_pal *pal)
28{
29 INIT_LIST_HEAD(&pal->node);
30}
31EXPORT_SYMBOL_GPL(uwb_pal_init);
32
33/**
34 * uwb_pal_register - register a UWB PAL
35 * @rc: the radio controller the PAL will be using
36 * @pal: the PAL
37 *
38 * The PAL must be initialized with uwb_pal_init().
39 */
40int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal)
41{
42 int ret;
43
44 if (pal->device) {
45 ret = sysfs_create_link(&pal->device->kobj,
46 &rc->uwb_dev.dev.kobj, "uwb_rc");
47 if (ret < 0)
48 return ret;
49 ret = sysfs_create_link(&rc->uwb_dev.dev.kobj,
50 &pal->device->kobj, pal->name);
51 if (ret < 0) {
52 sysfs_remove_link(&pal->device->kobj, "uwb_rc");
53 return ret;
54 }
55 }
56
57 spin_lock(&rc->pal_lock);
58 list_add(&pal->node, &rc->pals);
59 spin_unlock(&rc->pal_lock);
60
61 return 0;
62}
63EXPORT_SYMBOL_GPL(uwb_pal_register);
64
65/**
66 * uwb_pal_register - unregister a UWB PAL
67 * @rc: the radio controller the PAL was using
68 * @pal: the PAL
69 */
70void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal)
71{
72 spin_lock(&rc->pal_lock);
73 list_del(&pal->node);
74 spin_unlock(&rc->pal_lock);
75
76 if (pal->device) {
77 sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name);
78 sysfs_remove_link(&pal->device->kobj, "uwb_rc");
79 }
80}
81EXPORT_SYMBOL_GPL(uwb_pal_unregister);
82
83/**
84 * uwb_rc_pal_init - initialize the PAL related parts of a radio controller
85 * @rc: the radio controller
86 */
87void uwb_rc_pal_init(struct uwb_rc *rc)
88{
89 spin_lock_init(&rc->pal_lock);
90 INIT_LIST_HEAD(&rc->pals);
91}
diff --git a/drivers/uwb/reset.c b/drivers/uwb/reset.c
new file mode 100644
index 000000000000..8de856fa7958
--- /dev/null
+++ b/drivers/uwb/reset.c
@@ -0,0 +1,362 @@
1/*
2 * Ultra Wide Band
3 * UWB basic command support and radio reset
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME:
24 *
25 * - docs
26 *
27 * - Now we are serializing (using the uwb_dev->mutex) the command
28 * execution; it should be parallelized as much as possible some
29 * day.
30 */
31#include <linux/kernel.h>
32#include <linux/err.h>
33
34#include "uwb-internal.h"
35#define D_LOCAL 0
36#include <linux/uwb/debug.h>
37
38/**
39 * Command result codes (WUSB1.0[T8-69])
40 */
41static
42const char *__strerror[] = {
43 "success",
44 "failure",
45 "hardware failure",
46 "no more slots",
47 "beacon is too large",
48 "invalid parameter",
49 "unsupported power level",
50 "time out (wa) or invalid ie data (whci)",
51 "beacon size exceeded",
52 "cancelled",
53 "invalid state",
54 "invalid size",
55 "ack not recieved",
56 "no more asie notification",
57};
58
59
60/** Return a string matching the given error code */
61const char *uwb_rc_strerror(unsigned code)
62{
63 if (code == 255)
64 return "time out";
65 if (code >= ARRAY_SIZE(__strerror))
66 return "unknown error";
67 return __strerror[code];
68}
69
70int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
71 struct uwb_rccb *cmd, size_t cmd_size,
72 u8 expected_type, u16 expected_event,
73 uwb_rc_cmd_cb_f cb, void *arg)
74{
75 struct device *dev = &rc->uwb_dev.dev;
76 struct uwb_rc_neh *neh;
77 int needtofree = 0;
78 int result;
79
80 uwb_dev_lock(&rc->uwb_dev); /* Protect against rc->priv being removed */
81 if (rc->priv == NULL) {
82 uwb_dev_unlock(&rc->uwb_dev);
83 return -ESHUTDOWN;
84 }
85
86 if (rc->filter_cmd) {
87 needtofree = rc->filter_cmd(rc, &cmd, &cmd_size);
88 if (needtofree < 0 && needtofree != -ENOANO) {
89 dev_err(dev, "%s: filter error: %d\n",
90 cmd_name, needtofree);
91 uwb_dev_unlock(&rc->uwb_dev);
92 return needtofree;
93 }
94 }
95
96 neh = uwb_rc_neh_add(rc, cmd, expected_type, expected_event, cb, arg);
97 if (IS_ERR(neh)) {
98 result = PTR_ERR(neh);
99 goto out;
100 }
101
102 result = rc->cmd(rc, cmd, cmd_size);
103 uwb_dev_unlock(&rc->uwb_dev);
104 if (result < 0)
105 uwb_rc_neh_rm(rc, neh);
106 else
107 uwb_rc_neh_arm(rc, neh);
108 uwb_rc_neh_put(neh);
109out:
110 if (needtofree == 1)
111 kfree(cmd);
112 return result < 0 ? result : 0;
113}
114EXPORT_SYMBOL_GPL(uwb_rc_cmd_async);
115
116struct uwb_rc_cmd_done_params {
117 struct completion completion;
118 struct uwb_rceb *reply;
119 ssize_t reply_size;
120};
121
122static void uwb_rc_cmd_done(struct uwb_rc *rc, void *arg,
123 struct uwb_rceb *reply, ssize_t reply_size)
124{
125 struct uwb_rc_cmd_done_params *p = (struct uwb_rc_cmd_done_params *)arg;
126
127 if (reply_size > 0) {
128 if (p->reply)
129 reply_size = min(p->reply_size, reply_size);
130 else
131 p->reply = kmalloc(reply_size, GFP_ATOMIC);
132
133 if (p->reply)
134 memcpy(p->reply, reply, reply_size);
135 else
136 reply_size = -ENOMEM;
137 }
138 p->reply_size = reply_size;
139 complete(&p->completion);
140}
141
142
143/**
144 * Generic function for issuing commands to the Radio Control Interface
145 *
146 * @rc: UWB Radio Control descriptor
147 * @cmd_name: Name of the command being issued (for error messages)
148 * @cmd: Pointer to rccb structure containing the command;
149 * normally you embed this structure as the first member of
150 * the full command structure.
151 * @cmd_size: Size of the whole command buffer pointed to by @cmd.
152 * @reply: Pointer to where to store the reply
153 * @reply_size: @reply's size
154 * @expected_type: Expected type in the return event
155 * @expected_event: Expected event code in the return event
156 * @preply: Here a pointer to where the event data is received will
157 * be stored. Once done with the data, free with kfree().
158 *
159 * This function is generic; it works for commands that return a fixed
160 * and known size or for commands that return a variable amount of data.
161 *
162 * If a buffer is provided, that is used, although it could be chopped
163 * to the maximum size of the buffer. If the buffer is NULL, then one
164 * be allocated in *preply with the whole contents of the reply.
165 *
166 * @rc needs to be referenced
167 */
168static
169ssize_t __uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
170 struct uwb_rccb *cmd, size_t cmd_size,
171 struct uwb_rceb *reply, size_t reply_size,
172 u8 expected_type, u16 expected_event,
173 struct uwb_rceb **preply)
174{
175 ssize_t result = 0;
176 struct device *dev = &rc->uwb_dev.dev;
177 struct uwb_rc_cmd_done_params params;
178
179 init_completion(&params.completion);
180 params.reply = reply;
181 params.reply_size = reply_size;
182
183 result = uwb_rc_cmd_async(rc, cmd_name, cmd, cmd_size,
184 expected_type, expected_event,
185 uwb_rc_cmd_done, &params);
186 if (result)
187 return result;
188
189 wait_for_completion(&params.completion);
190
191 if (preply)
192 *preply = params.reply;
193
194 if (params.reply_size < 0)
195 dev_err(dev, "%s: confirmation event 0x%02x/%04x/%02x "
196 "reception failed: %d\n", cmd_name,
197 expected_type, expected_event, cmd->bCommandContext,
198 (int)params.reply_size);
199 return params.reply_size;
200}
201
202
203/**
204 * Generic function for issuing commands to the Radio Control Interface
205 *
206 * @rc: UWB Radio Control descriptor
207 * @cmd_name: Name of the command being issued (for error messages)
208 * @cmd: Pointer to rccb structure containing the command;
209 * normally you embed this structure as the first member of
210 * the full command structure.
211 * @cmd_size: Size of the whole command buffer pointed to by @cmd.
212 * @reply: Pointer to the beginning of the confirmation event
213 * buffer. Normally bigger than an 'struct hwarc_rceb'.
214 * You need to fill out reply->bEventType and reply->wEvent (in
215 * cpu order) as the function will use them to verify the
216 * confirmation event.
217 * @reply_size: Size of the reply buffer
218 *
219 * The function checks that the length returned in the reply is at
220 * least as big as @reply_size; if not, it will be deemed an error and
221 * -EIO returned.
222 *
223 * @rc needs to be referenced
224 */
225ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
226 struct uwb_rccb *cmd, size_t cmd_size,
227 struct uwb_rceb *reply, size_t reply_size)
228{
229 struct device *dev = &rc->uwb_dev.dev;
230 ssize_t result;
231
232 result = __uwb_rc_cmd(rc, cmd_name,
233 cmd, cmd_size, reply, reply_size,
234 reply->bEventType, reply->wEvent, NULL);
235
236 if (result > 0 && result < reply_size) {
237 dev_err(dev, "%s: not enough data returned for decoding reply "
238 "(%zu bytes received vs at least %zu needed)\n",
239 cmd_name, result, reply_size);
240 result = -EIO;
241 }
242 return result;
243}
244EXPORT_SYMBOL_GPL(uwb_rc_cmd);
245
246
247/**
248 * Generic function for issuing commands to the Radio Control
249 * Interface that return an unknown amount of data
250 *
251 * @rc: UWB Radio Control descriptor
252 * @cmd_name: Name of the command being issued (for error messages)
253 * @cmd: Pointer to rccb structure containing the command;
254 * normally you embed this structure as the first member of
255 * the full command structure.
256 * @cmd_size: Size of the whole command buffer pointed to by @cmd.
257 * @expected_type: Expected type in the return event
258 * @expected_event: Expected event code in the return event
259 * @preply: Here a pointer to where the event data is received will
260 * be stored. Once done with the data, free with kfree().
261 *
262 * The function checks that the length returned in the reply is at
263 * least as big as a 'struct uwb_rceb *'; if not, it will be deemed an
264 * error and -EIO returned.
265 *
266 * @rc needs to be referenced
267 */
268ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
269 struct uwb_rccb *cmd, size_t cmd_size,
270 u8 expected_type, u16 expected_event,
271 struct uwb_rceb **preply)
272{
273 return __uwb_rc_cmd(rc, cmd_name, cmd, cmd_size, NULL, 0,
274 expected_type, expected_event, preply);
275}
276EXPORT_SYMBOL_GPL(uwb_rc_vcmd);
277
278
279/**
280 * Reset a UWB Host Controller (and all radio settings)
281 *
282 * @rc: Host Controller descriptor
283 * @returns: 0 if ok, < 0 errno code on error
284 *
285 * We put the command on kmalloc'ed memory as some arches cannot do
286 * USB from the stack. The reply event is copied from an stage buffer,
287 * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
288 */
289int uwb_rc_reset(struct uwb_rc *rc)
290{
291 int result = -ENOMEM;
292 struct uwb_rc_evt_confirm reply;
293 struct uwb_rccb *cmd;
294 size_t cmd_size = sizeof(*cmd);
295
296 mutex_lock(&rc->uwb_dev.mutex);
297 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
298 if (cmd == NULL)
299 goto error_kzalloc;
300 cmd->bCommandType = UWB_RC_CET_GENERAL;
301 cmd->wCommand = cpu_to_le16(UWB_RC_CMD_RESET);
302 reply.rceb.bEventType = UWB_RC_CET_GENERAL;
303 reply.rceb.wEvent = UWB_RC_CMD_RESET;
304 result = uwb_rc_cmd(rc, "RESET", cmd, cmd_size,
305 &reply.rceb, sizeof(reply));
306 if (result < 0)
307 goto error_cmd;
308 if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
309 dev_err(&rc->uwb_dev.dev,
310 "RESET: command execution failed: %s (%d)\n",
311 uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
312 result = -EIO;
313 }
314error_cmd:
315 kfree(cmd);
316error_kzalloc:
317 mutex_unlock(&rc->uwb_dev.mutex);
318 return result;
319}
320
321int uwbd_msg_handle_reset(struct uwb_event *evt)
322{
323 struct uwb_rc *rc = evt->rc;
324 int ret;
325
326 /* Need to prevent the RC hardware module going away while in
327 the rc->reset() call. */
328 if (!try_module_get(rc->owner))
329 return 0;
330
331 dev_info(&rc->uwb_dev.dev, "resetting radio controller\n");
332 ret = rc->reset(rc);
333 if (ret)
334 dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret);
335
336 module_put(rc->owner);
337 return ret;
338}
339
340/**
341 * uwb_rc_reset_all - request a reset of the radio controller and PALs
342 * @rc: the radio controller of the hardware device to be reset.
343 *
344 * The full hardware reset of the radio controller and all the PALs
345 * will be scheduled.
346 */
347void uwb_rc_reset_all(struct uwb_rc *rc)
348{
349 struct uwb_event *evt;
350
351 evt = kzalloc(sizeof(struct uwb_event), GFP_ATOMIC);
352 if (unlikely(evt == NULL))
353 return;
354
355 evt->rc = __uwb_rc_get(rc); /* will be put by uwbd's uwbd_event_handle() */
356 evt->ts_jiffies = jiffies;
357 evt->type = UWB_EVT_TYPE_MSG;
358 evt->message = UWB_EVT_MSG_RESET;
359
360 uwbd_event_queue(evt);
361}
362EXPORT_SYMBOL_GPL(uwb_rc_reset_all);
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c
new file mode 100644
index 000000000000..bae16204576d
--- /dev/null
+++ b/drivers/uwb/rsv.c
@@ -0,0 +1,680 @@
1/*
2 * UWB reservation management.
3 *
4 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/uwb.h>
21
22#include "uwb-internal.h"
23
24static void uwb_rsv_timer(unsigned long arg);
25
26static const char *rsv_states[] = {
27 [UWB_RSV_STATE_NONE] = "none",
28 [UWB_RSV_STATE_O_INITIATED] = "initiated",
29 [UWB_RSV_STATE_O_PENDING] = "pending",
30 [UWB_RSV_STATE_O_MODIFIED] = "modified",
31 [UWB_RSV_STATE_O_ESTABLISHED] = "established",
32 [UWB_RSV_STATE_T_ACCEPTED] = "accepted",
33 [UWB_RSV_STATE_T_DENIED] = "denied",
34 [UWB_RSV_STATE_T_PENDING] = "pending",
35};
36
37static const char *rsv_types[] = {
38 [UWB_DRP_TYPE_ALIEN_BP] = "alien-bp",
39 [UWB_DRP_TYPE_HARD] = "hard",
40 [UWB_DRP_TYPE_SOFT] = "soft",
41 [UWB_DRP_TYPE_PRIVATE] = "private",
42 [UWB_DRP_TYPE_PCA] = "pca",
43};
44
45/**
46 * uwb_rsv_state_str - return a string for a reservation state
47 * @state: the reservation state.
48 */
49const char *uwb_rsv_state_str(enum uwb_rsv_state state)
50{
51 if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST)
52 return "unknown";
53 return rsv_states[state];
54}
55EXPORT_SYMBOL_GPL(uwb_rsv_state_str);
56
57/**
58 * uwb_rsv_type_str - return a string for a reservation type
59 * @type: the reservation type
60 */
61const char *uwb_rsv_type_str(enum uwb_drp_type type)
62{
63 if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA)
64 return "invalid";
65 return rsv_types[type];
66}
67EXPORT_SYMBOL_GPL(uwb_rsv_type_str);
68
69static void uwb_rsv_dump(struct uwb_rsv *rsv)
70{
71 struct device *dev = &rsv->rc->uwb_dev.dev;
72 struct uwb_dev_addr devaddr;
73 char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
74
75 uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
76 if (rsv->target.type == UWB_RSV_TARGET_DEV)
77 devaddr = rsv->target.dev->dev_addr;
78 else
79 devaddr = rsv->target.devaddr;
80 uwb_dev_addr_print(target, sizeof(target), &devaddr);
81
82 dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state));
83}
84
85/*
86 * Get a free stream index for a reservation.
87 *
88 * If the target is a DevAddr (e.g., a WUSB cluster reservation) then
89 * the stream is allocated from a pool of per-RC stream indexes,
90 * otherwise a unique stream index for the target is selected.
91 */
92static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
93{
94 struct uwb_rc *rc = rsv->rc;
95 unsigned long *streams_bm;
96 int stream;
97
98 switch (rsv->target.type) {
99 case UWB_RSV_TARGET_DEV:
100 streams_bm = rsv->target.dev->streams;
101 break;
102 case UWB_RSV_TARGET_DEVADDR:
103 streams_bm = rc->uwb_dev.streams;
104 break;
105 default:
106 return -EINVAL;
107 }
108
109 stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS);
110 if (stream >= UWB_NUM_STREAMS)
111 return -EBUSY;
112
113 rsv->stream = stream;
114 set_bit(stream, streams_bm);
115
116 return 0;
117}
118
119static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
120{
121 struct uwb_rc *rc = rsv->rc;
122 unsigned long *streams_bm;
123
124 switch (rsv->target.type) {
125 case UWB_RSV_TARGET_DEV:
126 streams_bm = rsv->target.dev->streams;
127 break;
128 case UWB_RSV_TARGET_DEVADDR:
129 streams_bm = rc->uwb_dev.streams;
130 break;
131 default:
132 return;
133 }
134
135 clear_bit(rsv->stream, streams_bm);
136}
137
138/*
139 * Generate a MAS allocation with a single row component.
140 */
141static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas,
142 int first_mas, int mas_per_zone,
143 int zs, int ze)
144{
145 struct uwb_mas_bm col;
146 int z;
147
148 bitmap_zero(mas->bm, UWB_NUM_MAS);
149 bitmap_zero(col.bm, UWB_NUM_MAS);
150 bitmap_fill(col.bm, mas_per_zone);
151 bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS);
152
153 for (z = zs; z <= ze; z++) {
154 bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS);
155 bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS);
156 }
157}
158
159/*
160 * Allocate some MAS for this reservation based on current local
161 * availability, the reservation parameters (max_mas, min_mas,
162 * sparsity), and the WiMedia rules for MAS allocations.
163 *
164 * Returns -EBUSY is insufficient free MAS are available.
165 *
166 * FIXME: to simplify this, only safe reservations with a single row
167 * component in zones 1 to 15 are tried (zone 0 is skipped to avoid
168 * problems with the MAS reserved for the BP).
169 *
170 * [ECMA-368] section B.2.
171 */
172static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv)
173{
174 static const int safe_mas_in_row[UWB_NUM_ZONES] = {
175 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1,
176 };
177 int n, r;
178 struct uwb_mas_bm mas;
179 bool found = false;
180
181 /*
182 * Search all valid safe allocations until either: too few MAS
183 * are available; or the smallest allocation with sufficient
184 * MAS is found.
185 *
186 * The top of the zones are preferred, so space for larger
187 * allocations is available in the bottom of the zone (e.g., a
188 * 15 MAS allocation should start in row 14 leaving space for
189 * a 120 MAS allocation at row 0).
190 */
191 for (n = safe_mas_in_row[0]; n >= 1; n--) {
192 int num_mas;
193
194 num_mas = n * (UWB_NUM_ZONES - 1);
195 if (num_mas < rsv->min_mas)
196 break;
197 if (found && num_mas < rsv->max_mas)
198 break;
199
200 for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) {
201 if (safe_mas_in_row[r] < n)
202 continue;
203 uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES);
204 if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) {
205 found = true;
206 break;
207 }
208 }
209 }
210
211 if (!found)
212 return -EBUSY;
213
214 bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
215 return 0;
216}
217
218static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
219{
220 int sframes = UWB_MAX_LOST_BEACONS;
221
222 /*
223 * Multicast reservations can become established within 1
224 * super frame and should not be terminated if no response is
225 * received.
226 */
227 if (rsv->is_multicast) {
228 if (rsv->state == UWB_RSV_STATE_O_INITIATED)
229 sframes = 1;
230 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED)
231 sframes = 0;
232 }
233
234 rsv->expired = false;
235 if (sframes > 0) {
236 /*
237 * Add an additional 2 superframes to account for the
238 * time to send the SET DRP IE command.
239 */
240 unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US;
241 mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us));
242 } else
243 del_timer(&rsv->timer);
244}
245
246/*
247 * Update a reservations state, and schedule an update of the
248 * transmitted DRP IEs.
249 */
250static void uwb_rsv_state_update(struct uwb_rsv *rsv,
251 enum uwb_rsv_state new_state)
252{
253 rsv->state = new_state;
254 rsv->ie_valid = false;
255
256 uwb_rsv_dump(rsv);
257
258 uwb_rsv_stroke_timer(rsv);
259 uwb_rsv_sched_update(rsv->rc);
260}
261
262static void uwb_rsv_callback(struct uwb_rsv *rsv)
263{
264 if (rsv->callback)
265 rsv->callback(rsv);
266}
267
268void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
269{
270 if (rsv->state == new_state) {
271 switch (rsv->state) {
272 case UWB_RSV_STATE_O_ESTABLISHED:
273 case UWB_RSV_STATE_T_ACCEPTED:
274 case UWB_RSV_STATE_NONE:
275 uwb_rsv_stroke_timer(rsv);
276 break;
277 default:
278 /* Expecting a state transition so leave timer
279 as-is. */
280 break;
281 }
282 return;
283 }
284
285 switch (new_state) {
286 case UWB_RSV_STATE_NONE:
287 uwb_drp_avail_release(rsv->rc, &rsv->mas);
288 uwb_rsv_put_stream(rsv);
289 uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE);
290 uwb_rsv_callback(rsv);
291 break;
292 case UWB_RSV_STATE_O_INITIATED:
293 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED);
294 break;
295 case UWB_RSV_STATE_O_PENDING:
296 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING);
297 break;
298 case UWB_RSV_STATE_O_ESTABLISHED:
299 uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
300 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED);
301 uwb_rsv_callback(rsv);
302 break;
303 case UWB_RSV_STATE_T_ACCEPTED:
304 uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
305 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED);
306 uwb_rsv_callback(rsv);
307 break;
308 case UWB_RSV_STATE_T_DENIED:
309 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED);
310 break;
311 default:
312 dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n",
313 uwb_rsv_state_str(new_state), new_state);
314 }
315}
316
317static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
318{
319 struct uwb_rsv *rsv;
320
321 rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL);
322 if (!rsv)
323 return NULL;
324
325 INIT_LIST_HEAD(&rsv->rc_node);
326 INIT_LIST_HEAD(&rsv->pal_node);
327 init_timer(&rsv->timer);
328 rsv->timer.function = uwb_rsv_timer;
329 rsv->timer.data = (unsigned long)rsv;
330
331 rsv->rc = rc;
332
333 return rsv;
334}
335
336static void uwb_rsv_free(struct uwb_rsv *rsv)
337{
338 uwb_dev_put(rsv->owner);
339 if (rsv->target.type == UWB_RSV_TARGET_DEV)
340 uwb_dev_put(rsv->target.dev);
341 kfree(rsv);
342}
343
344/**
345 * uwb_rsv_create - allocate and initialize a UWB reservation structure
346 * @rc: the radio controller
347 * @cb: callback to use when the reservation completes or terminates
348 * @pal_priv: data private to the PAL to be passed in the callback
349 *
350 * The callback is called when the state of the reservation changes from:
351 *
352 * - pending to accepted
353 * - pending to denined
354 * - accepted to terminated
355 * - pending to terminated
356 */
357struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv)
358{
359 struct uwb_rsv *rsv;
360
361 rsv = uwb_rsv_alloc(rc);
362 if (!rsv)
363 return NULL;
364
365 rsv->callback = cb;
366 rsv->pal_priv = pal_priv;
367
368 return rsv;
369}
370EXPORT_SYMBOL_GPL(uwb_rsv_create);
371
372void uwb_rsv_remove(struct uwb_rsv *rsv)
373{
374 if (rsv->state != UWB_RSV_STATE_NONE)
375 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
376 del_timer_sync(&rsv->timer);
377 list_del(&rsv->rc_node);
378 uwb_rsv_free(rsv);
379}
380
381/**
382 * uwb_rsv_destroy - free a UWB reservation structure
383 * @rsv: the reservation to free
384 *
385 * The reservation will be terminated if it is pending or established.
386 */
387void uwb_rsv_destroy(struct uwb_rsv *rsv)
388{
389 struct uwb_rc *rc = rsv->rc;
390
391 mutex_lock(&rc->rsvs_mutex);
392 uwb_rsv_remove(rsv);
393 mutex_unlock(&rc->rsvs_mutex);
394}
395EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
396
397/**
398 * usb_rsv_establish - start a reservation establishment
399 * @rsv: the reservation
400 *
401 * The PAL should fill in @rsv's owner, target, type, max_mas,
402 * min_mas, sparsity and is_multicast fields. If the target is a
403 * uwb_dev it must be referenced.
404 *
405 * The reservation's callback will be called when the reservation is
406 * accepted, denied or times out.
407 */
408int uwb_rsv_establish(struct uwb_rsv *rsv)
409{
410 struct uwb_rc *rc = rsv->rc;
411 int ret;
412
413 mutex_lock(&rc->rsvs_mutex);
414
415 ret = uwb_rsv_get_stream(rsv);
416 if (ret)
417 goto out;
418
419 ret = uwb_rsv_alloc_mas(rsv);
420 if (ret) {
421 uwb_rsv_put_stream(rsv);
422 goto out;
423 }
424
425 list_add_tail(&rsv->rc_node, &rc->reservations);
426 rsv->owner = &rc->uwb_dev;
427 uwb_dev_get(rsv->owner);
428 uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED);
429out:
430 mutex_unlock(&rc->rsvs_mutex);
431 return ret;
432}
433EXPORT_SYMBOL_GPL(uwb_rsv_establish);
434
435/**
436 * uwb_rsv_modify - modify an already established reservation
437 * @rsv: the reservation to modify
438 * @max_mas: new maximum MAS to reserve
439 * @min_mas: new minimum MAS to reserve
440 * @sparsity: new sparsity to use
441 *
442 * FIXME: implement this once there are PALs that use it.
443 */
444int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity)
445{
446 return -ENOSYS;
447}
448EXPORT_SYMBOL_GPL(uwb_rsv_modify);
449
450/**
451 * uwb_rsv_terminate - terminate an established reservation
452 * @rsv: the reservation to terminate
453 *
454 * A reservation is terminated by removing the DRP IE from the beacon,
455 * the other end will consider the reservation to be terminated when
456 * it does not see the DRP IE for at least mMaxLostBeacons.
457 *
458 * If applicable, the reference to the target uwb_dev will be released.
459 */
460void uwb_rsv_terminate(struct uwb_rsv *rsv)
461{
462 struct uwb_rc *rc = rsv->rc;
463
464 mutex_lock(&rc->rsvs_mutex);
465
466 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
467
468 mutex_unlock(&rc->rsvs_mutex);
469}
470EXPORT_SYMBOL_GPL(uwb_rsv_terminate);
471
472/**
473 * uwb_rsv_accept - accept a new reservation from a peer
474 * @rsv: the reservation
475 * @cb: call back for reservation changes
476 * @pal_priv: data to be passed in the above call back
477 *
478 * Reservation requests from peers are denied unless a PAL accepts it
479 * by calling this function.
480 */
481void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv)
482{
483 rsv->callback = cb;
484 rsv->pal_priv = pal_priv;
485 rsv->state = UWB_RSV_STATE_T_ACCEPTED;
486}
487EXPORT_SYMBOL_GPL(uwb_rsv_accept);
488
489/*
490 * Is a received DRP IE for this reservation?
491 */
492static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src,
493 struct uwb_ie_drp *drp_ie)
494{
495 struct uwb_dev_addr *rsv_src;
496 int stream;
497
498 stream = uwb_ie_drp_stream_index(drp_ie);
499
500 if (rsv->stream != stream)
501 return false;
502
503 switch (rsv->target.type) {
504 case UWB_RSV_TARGET_DEVADDR:
505 return rsv->stream == stream;
506 case UWB_RSV_TARGET_DEV:
507 if (uwb_ie_drp_owner(drp_ie))
508 rsv_src = &rsv->owner->dev_addr;
509 else
510 rsv_src = &rsv->target.dev->dev_addr;
511 return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0;
512 }
513 return false;
514}
515
516static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
517 struct uwb_dev *src,
518 struct uwb_ie_drp *drp_ie)
519{
520 struct uwb_rsv *rsv;
521 struct uwb_pal *pal;
522 enum uwb_rsv_state state;
523
524 rsv = uwb_rsv_alloc(rc);
525 if (!rsv)
526 return NULL;
527
528 rsv->rc = rc;
529 rsv->owner = src;
530 uwb_dev_get(rsv->owner);
531 rsv->target.type = UWB_RSV_TARGET_DEV;
532 rsv->target.dev = &rc->uwb_dev;
533 rsv->type = uwb_ie_drp_type(drp_ie);
534 rsv->stream = uwb_ie_drp_stream_index(drp_ie);
535 set_bit(rsv->stream, rsv->owner->streams);
536 uwb_drp_ie_to_bm(&rsv->mas, drp_ie);
537
538 /*
539 * See if any PALs are interested in this reservation. If not,
540 * deny the request.
541 */
542 rsv->state = UWB_RSV_STATE_T_DENIED;
543 spin_lock(&rc->pal_lock);
544 list_for_each_entry(pal, &rc->pals, node) {
545 if (pal->new_rsv)
546 pal->new_rsv(rsv);
547 if (rsv->state == UWB_RSV_STATE_T_ACCEPTED)
548 break;
549 }
550 spin_unlock(&rc->pal_lock);
551
552 list_add_tail(&rsv->rc_node, &rc->reservations);
553 state = rsv->state;
554 rsv->state = UWB_RSV_STATE_NONE;
555 uwb_rsv_set_state(rsv, state);
556
557 return rsv;
558}
559
560/**
561 * uwb_rsv_find - find a reservation for a received DRP IE.
562 * @rc: the radio controller
563 * @src: source of the DRP IE
564 * @drp_ie: the DRP IE
565 *
566 * If the reservation cannot be found and the DRP IE is from a peer
567 * attempting to establish a new reservation, create a new reservation
568 * and add it to the list.
569 */
570struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
571 struct uwb_ie_drp *drp_ie)
572{
573 struct uwb_rsv *rsv;
574
575 list_for_each_entry(rsv, &rc->reservations, rc_node) {
576 if (uwb_rsv_match(rsv, src, drp_ie))
577 return rsv;
578 }
579
580 if (uwb_ie_drp_owner(drp_ie))
581 return uwb_rsv_new_target(rc, src, drp_ie);
582
583 return NULL;
584}
585
586/*
587 * Go through all the reservations and check for timeouts and (if
588 * necessary) update their DRP IEs.
589 *
590 * FIXME: look at building the SET_DRP_IE command here rather than
591 * having to rescan the list in uwb_rc_send_all_drp_ie().
592 */
593static bool uwb_rsv_update_all(struct uwb_rc *rc)
594{
595 struct uwb_rsv *rsv, *t;
596 bool ie_updated = false;
597
598 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
599 if (rsv->expired)
600 uwb_drp_handle_timeout(rsv);
601 if (!rsv->ie_valid) {
602 uwb_drp_ie_update(rsv);
603 ie_updated = true;
604 }
605 }
606
607 return ie_updated;
608}
609
610void uwb_rsv_sched_update(struct uwb_rc *rc)
611{
612 queue_work(rc->rsv_workq, &rc->rsv_update_work);
613}
614
615/*
616 * Update DRP IEs and, if necessary, the DRP Availability IE and send
617 * the updated IEs to the radio controller.
618 */
619static void uwb_rsv_update_work(struct work_struct *work)
620{
621 struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work);
622 bool ie_updated;
623
624 mutex_lock(&rc->rsvs_mutex);
625
626 ie_updated = uwb_rsv_update_all(rc);
627
628 if (!rc->drp_avail.ie_valid) {
629 uwb_drp_avail_ie_update(rc);
630 ie_updated = true;
631 }
632
633 if (ie_updated)
634 uwb_rc_send_all_drp_ie(rc);
635
636 mutex_unlock(&rc->rsvs_mutex);
637}
638
639static void uwb_rsv_timer(unsigned long arg)
640{
641 struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
642
643 rsv->expired = true;
644 uwb_rsv_sched_update(rsv->rc);
645}
646
647void uwb_rsv_init(struct uwb_rc *rc)
648{
649 INIT_LIST_HEAD(&rc->reservations);
650 mutex_init(&rc->rsvs_mutex);
651 INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work);
652
653 bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
654}
655
656int uwb_rsv_setup(struct uwb_rc *rc)
657{
658 char name[16];
659
660 snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev));
661 rc->rsv_workq = create_singlethread_workqueue(name);
662 if (rc->rsv_workq == NULL)
663 return -ENOMEM;
664
665 return 0;
666}
667
668void uwb_rsv_cleanup(struct uwb_rc *rc)
669{
670 struct uwb_rsv *rsv, *t;
671
672 mutex_lock(&rc->rsvs_mutex);
673 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
674 uwb_rsv_remove(rsv);
675 }
676 mutex_unlock(&rc->rsvs_mutex);
677
678 cancel_work_sync(&rc->rsv_update_work);
679 destroy_workqueue(rc->rsv_workq);
680}
diff --git a/drivers/uwb/scan.c b/drivers/uwb/scan.c
new file mode 100644
index 000000000000..2d270748f32b
--- /dev/null
+++ b/drivers/uwb/scan.c
@@ -0,0 +1,133 @@
1/*
2 * Ultra Wide Band
3 * Scanning management
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 *
24 * FIXME: docs
25 * FIXME: there are issues here on how BEACON and SCAN on USB RCI deal
26 * with each other. Currently seems that START_BEACON while
27 * SCAN_ONLY will cancel the scan, so we need to update the
28 * state here. Clarification request sent by email on
29 * 10/05/2005.
30 * 10/28/2005 No clear answer heard--maybe we'll hack the API
31 * so that when we start beaconing, if the HC is
32 * scanning in a mode not compatible with beaconing
33 * we just fail.
34 */
35
36#include <linux/device.h>
37#include <linux/err.h>
38#include "uwb-internal.h"
39
40
41/**
42 * Start/stop scanning in a radio controller
43 *
44 * @rc: UWB Radio Controlller
45 * @channel: Channel to scan; encodings in WUSB1.0[Table 5.12]
46 * @type: Type of scanning to do.
47 * @bpst_offset: value at which to start scanning (if type ==
48 * UWB_SCAN_ONLY_STARTTIME)
49 * @returns: 0 if ok, < 0 errno code on error
50 *
51 * We put the command on kmalloc'ed memory as some arches cannot do
52 * USB from the stack. The reply event is copied from an stage buffer,
53 * so it can be in the stack. See WUSB1.0[8.6.2.4] for more details.
54 */
55int uwb_rc_scan(struct uwb_rc *rc,
56 unsigned channel, enum uwb_scan_type type,
57 unsigned bpst_offset)
58{
59 int result;
60 struct uwb_rc_cmd_scan *cmd;
61 struct uwb_rc_evt_confirm reply;
62
63 result = -ENOMEM;
64 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
65 if (cmd == NULL)
66 goto error_kzalloc;
67 mutex_lock(&rc->uwb_dev.mutex);
68 cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
69 cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SCAN);
70 cmd->bChannelNumber = channel;
71 cmd->bScanState = type;
72 cmd->wStartTime = cpu_to_le16(bpst_offset);
73 reply.rceb.bEventType = UWB_RC_CET_GENERAL;
74 reply.rceb.wEvent = UWB_RC_CMD_SCAN;
75 result = uwb_rc_cmd(rc, "SCAN", &cmd->rccb, sizeof(*cmd),
76 &reply.rceb, sizeof(reply));
77 if (result < 0)
78 goto error_cmd;
79 if (reply.bResultCode != UWB_RC_RES_SUCCESS) {
80 dev_err(&rc->uwb_dev.dev,
81 "SCAN: command execution failed: %s (%d)\n",
82 uwb_rc_strerror(reply.bResultCode), reply.bResultCode);
83 result = -EIO;
84 goto error_cmd;
85 }
86 rc->scanning = channel;
87 rc->scan_type = type;
88error_cmd:
89 mutex_unlock(&rc->uwb_dev.mutex);
90 kfree(cmd);
91error_kzalloc:
92 return result;
93}
94
95/*
96 * Print scanning state
97 */
98static ssize_t uwb_rc_scan_show(struct device *dev,
99 struct device_attribute *attr, char *buf)
100{
101 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
102 struct uwb_rc *rc = uwb_dev->rc;
103 ssize_t result;
104
105 mutex_lock(&rc->uwb_dev.mutex);
106 result = sprintf(buf, "%d %d\n", rc->scanning, rc->scan_type);
107 mutex_unlock(&rc->uwb_dev.mutex);
108 return result;
109}
110
111/*
112 *
113 */
114static ssize_t uwb_rc_scan_store(struct device *dev,
115 struct device_attribute *attr,
116 const char *buf, size_t size)
117{
118 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
119 struct uwb_rc *rc = uwb_dev->rc;
120 unsigned channel;
121 unsigned type;
122 unsigned bpst_offset = 0;
123 ssize_t result = -EINVAL;
124
125 result = sscanf(buf, "%u %u %u\n", &channel, &type, &bpst_offset);
126 if (result >= 2 && type < UWB_SCAN_TOP)
127 result = uwb_rc_scan(rc, channel, type, bpst_offset);
128
129 return result < 0 ? result : size;
130}
131
132/** Radio Control sysfs interface (declaration) */
133DEVICE_ATTR(scan, S_IRUGO | S_IWUSR, uwb_rc_scan_show, uwb_rc_scan_store);
diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c
new file mode 100644
index 000000000000..2d8d62d9f53e
--- /dev/null
+++ b/drivers/uwb/umc-bus.c
@@ -0,0 +1,218 @@
1/*
2 * Bus for UWB Multi-interface Controller capabilities.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This file is released under the GNU GPL v2.
7 */
8#include <linux/kernel.h>
9#include <linux/sysfs.h>
10#include <linux/workqueue.h>
11#include <linux/uwb/umc.h>
12#include <linux/pci.h>
13
14static int umc_bus_unbind_helper(struct device *dev, void *data)
15{
16 struct device *parent = data;
17
18 if (dev->parent == parent && dev->driver)
19 device_release_driver(dev);
20 return 0;
21}
22
23/**
24 * umc_controller_reset - reset the whole UMC controller
25 * @umc: the UMC device for the radio controller.
26 *
27 * Drivers will be unbound from all UMC devices belonging to the
28 * controller and then the radio controller will be rebound. The
29 * radio controller is expected to do a full hardware reset when it is
30 * probed.
31 *
32 * If this is called while a probe() or remove() is in progress it
33 * will return -EAGAIN and not perform the reset.
34 */
35int umc_controller_reset(struct umc_dev *umc)
36{
37 struct device *parent = umc->dev.parent;
38 int ret;
39
40 if (down_trylock(&parent->sem))
41 return -EAGAIN;
42 bus_for_each_dev(&umc_bus_type, NULL, parent, umc_bus_unbind_helper);
43 ret = device_attach(&umc->dev);
44 if (ret == 1)
45 ret = 0;
46 up(&parent->sem);
47
48 return ret;
49}
50EXPORT_SYMBOL_GPL(umc_controller_reset);
51
52/**
53 * umc_match_pci_id - match a UMC driver to a UMC device's parent PCI device.
54 * @umc_drv: umc driver with match_data pointing to a zero-terminated
55 * table of pci_device_id's.
56 * @umc: umc device whose parent is to be matched.
57 */
58int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc)
59{
60 const struct pci_device_id *id_table = umc_drv->match_data;
61 struct pci_dev *pci;
62
63 if (umc->dev.parent->bus != &pci_bus_type)
64 return 0;
65
66 pci = to_pci_dev(umc->dev.parent);
67 return pci_match_id(id_table, pci) != NULL;
68}
69EXPORT_SYMBOL_GPL(umc_match_pci_id);
70
71static int umc_bus_rescan_helper(struct device *dev, void *data)
72{
73 int ret = 0;
74
75 if (!dev->driver)
76 ret = device_attach(dev);
77
78 return ret < 0 ? ret : 0;
79}
80
81static void umc_bus_rescan(void)
82{
83 int err;
84
85 /*
86 * We can't use bus_rescan_devices() here as it deadlocks when
87 * it tries to retake the dev->parent semaphore.
88 */
89 err = bus_for_each_dev(&umc_bus_type, NULL, NULL, umc_bus_rescan_helper);
90 if (err < 0)
91 printk(KERN_WARNING "%s: rescan of bus failed: %d\n",
92 KBUILD_MODNAME, err);
93}
94
95static int umc_bus_match(struct device *dev, struct device_driver *drv)
96{
97 struct umc_dev *umc = to_umc_dev(dev);
98 struct umc_driver *umc_driver = to_umc_driver(drv);
99
100 if (umc->cap_id == umc_driver->cap_id) {
101 if (umc_driver->match)
102 return umc_driver->match(umc_driver, umc);
103 else
104 return 1;
105 }
106 return 0;
107}
108
109static int umc_device_probe(struct device *dev)
110{
111 struct umc_dev *umc;
112 struct umc_driver *umc_driver;
113 int err;
114
115 umc_driver = to_umc_driver(dev->driver);
116 umc = to_umc_dev(dev);
117
118 get_device(dev);
119 err = umc_driver->probe(umc);
120 if (err)
121 put_device(dev);
122 else
123 umc_bus_rescan();
124
125 return err;
126}
127
128static int umc_device_remove(struct device *dev)
129{
130 struct umc_dev *umc;
131 struct umc_driver *umc_driver;
132
133 umc_driver = to_umc_driver(dev->driver);
134 umc = to_umc_dev(dev);
135
136 umc_driver->remove(umc);
137 put_device(dev);
138 return 0;
139}
140
141static int umc_device_suspend(struct device *dev, pm_message_t state)
142{
143 struct umc_dev *umc;
144 struct umc_driver *umc_driver;
145 int err = 0;
146
147 umc = to_umc_dev(dev);
148
149 if (dev->driver) {
150 umc_driver = to_umc_driver(dev->driver);
151 if (umc_driver->suspend)
152 err = umc_driver->suspend(umc, state);
153 }
154 return err;
155}
156
157static int umc_device_resume(struct device *dev)
158{
159 struct umc_dev *umc;
160 struct umc_driver *umc_driver;
161 int err = 0;
162
163 umc = to_umc_dev(dev);
164
165 if (dev->driver) {
166 umc_driver = to_umc_driver(dev->driver);
167 if (umc_driver->resume)
168 err = umc_driver->resume(umc);
169 }
170 return err;
171}
172
173static ssize_t capability_id_show(struct device *dev, struct device_attribute *attr, char *buf)
174{
175 struct umc_dev *umc = to_umc_dev(dev);
176
177 return sprintf(buf, "0x%02x\n", umc->cap_id);
178}
179
180static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf)
181{
182 struct umc_dev *umc = to_umc_dev(dev);
183
184 return sprintf(buf, "0x%04x\n", umc->version);
185}
186
187static struct device_attribute umc_dev_attrs[] = {
188 __ATTR_RO(capability_id),
189 __ATTR_RO(version),
190 __ATTR_NULL,
191};
192
193struct bus_type umc_bus_type = {
194 .name = "umc",
195 .match = umc_bus_match,
196 .probe = umc_device_probe,
197 .remove = umc_device_remove,
198 .suspend = umc_device_suspend,
199 .resume = umc_device_resume,
200 .dev_attrs = umc_dev_attrs,
201};
202EXPORT_SYMBOL_GPL(umc_bus_type);
203
204static int __init umc_bus_init(void)
205{
206 return bus_register(&umc_bus_type);
207}
208module_init(umc_bus_init);
209
210static void __exit umc_bus_exit(void)
211{
212 bus_unregister(&umc_bus_type);
213}
214module_exit(umc_bus_exit);
215
216MODULE_DESCRIPTION("UWB Multi-interface Controller capability bus");
217MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
218MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/umc-dev.c b/drivers/uwb/umc-dev.c
new file mode 100644
index 000000000000..aa44e1c1a102
--- /dev/null
+++ b/drivers/uwb/umc-dev.c
@@ -0,0 +1,104 @@
1/*
2 * UWB Multi-interface Controller device management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This file is released under the GNU GPL v2.
7 */
8#include <linux/kernel.h>
9#include <linux/uwb/umc.h>
10#define D_LOCAL 0
11#include <linux/uwb/debug.h>
12
13static void umc_device_release(struct device *dev)
14{
15 struct umc_dev *umc = to_umc_dev(dev);
16
17 kfree(umc);
18}
19
20/**
21 * umc_device_create - allocate a child UMC device
22 * @parent: parent of the new UMC device.
23 * @n: index of the new device.
24 *
25 * The new UMC device will have a bus ID of the parent with '-n'
26 * appended.
27 */
28struct umc_dev *umc_device_create(struct device *parent, int n)
29{
30 struct umc_dev *umc;
31
32 umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL);
33 if (umc) {
34 snprintf(umc->dev.bus_id, sizeof(umc->dev.bus_id), "%s-%d",
35 parent->bus_id, n);
36 umc->dev.parent = parent;
37 umc->dev.bus = &umc_bus_type;
38 umc->dev.release = umc_device_release;
39
40 umc->dev.dma_mask = parent->dma_mask;
41 }
42 return umc;
43}
44EXPORT_SYMBOL_GPL(umc_device_create);
45
46/**
47 * umc_device_register - register a UMC device
48 * @umc: pointer to the UMC device
49 *
50 * The memory resource for the UMC device is acquired and the device
51 * registered with the system.
52 */
53int umc_device_register(struct umc_dev *umc)
54{
55 int err;
56
57 d_fnstart(3, &umc->dev, "(umc_dev %p)\n", umc);
58
59 err = request_resource(umc->resource.parent, &umc->resource);
60 if (err < 0) {
61 dev_err(&umc->dev, "can't allocate resource range "
62 "%016Lx to %016Lx: %d\n",
63 (unsigned long long)umc->resource.start,
64 (unsigned long long)umc->resource.end,
65 err);
66 goto error_request_resource;
67 }
68
69 err = device_register(&umc->dev);
70 if (err < 0)
71 goto error_device_register;
72 d_fnend(3, &umc->dev, "(umc_dev %p) = 0\n", umc);
73 return 0;
74
75error_device_register:
76 release_resource(&umc->resource);
77error_request_resource:
78 d_fnend(3, &umc->dev, "(umc_dev %p) = %d\n", umc, err);
79 return err;
80}
81EXPORT_SYMBOL_GPL(umc_device_register);
82
83/**
84 * umc_device_unregister - unregister a UMC device
85 * @umc: pointer to the UMC device
86 *
87 * First we unregister the device, make sure the driver can do it's
88 * resource release thing and then we try to release any left over
89 * resources. We take a ref to the device, to make sure it doesn't
90 * dissapear under our feet.
91 */
92void umc_device_unregister(struct umc_dev *umc)
93{
94 struct device *dev;
95 if (!umc)
96 return;
97 dev = get_device(&umc->dev);
98 d_fnstart(3, dev, "(umc_dev %p)\n", umc);
99 device_unregister(&umc->dev);
100 release_resource(&umc->resource);
101 d_fnend(3, dev, "(umc_dev %p) = void\n", umc);
102 put_device(dev);
103}
104EXPORT_SYMBOL_GPL(umc_device_unregister);
diff --git a/drivers/uwb/umc-drv.c b/drivers/uwb/umc-drv.c
new file mode 100644
index 000000000000..367b5eb85d60
--- /dev/null
+++ b/drivers/uwb/umc-drv.c
@@ -0,0 +1,31 @@
1/*
2 * UWB Multi-interface Controller driver management.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This file is released under the GNU GPL v2.
7 */
8#include <linux/kernel.h>
9#include <linux/uwb/umc.h>
10
11int __umc_driver_register(struct umc_driver *umc_drv, struct module *module,
12 const char *mod_name)
13{
14 umc_drv->driver.name = umc_drv->name;
15 umc_drv->driver.owner = module;
16 umc_drv->driver.mod_name = mod_name;
17 umc_drv->driver.bus = &umc_bus_type;
18
19 return driver_register(&umc_drv->driver);
20}
21EXPORT_SYMBOL_GPL(__umc_driver_register);
22
23/**
24 * umc_driver_register - unregister a UMC capabiltity driver.
25 * @umc_drv: pointer to the driver.
26 */
27void umc_driver_unregister(struct umc_driver *umc_drv)
28{
29 driver_unregister(&umc_drv->driver);
30}
31EXPORT_SYMBOL_GPL(umc_driver_unregister);
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c
new file mode 100644
index 000000000000..6d232c35d07d
--- /dev/null
+++ b/drivers/uwb/uwb-debug.c
@@ -0,0 +1,367 @@
1/*
2 * Ultra Wide Band
3 * Debug support
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: doc
24 */
25
26#include <linux/spinlock.h>
27#include <linux/module.h>
28#include <linux/slab.h>
29#include <linux/notifier.h>
30#include <linux/device.h>
31#include <linux/debugfs.h>
32#include <linux/uaccess.h>
33#include <linux/seq_file.h>
34
35#include <linux/uwb/debug-cmd.h>
36#define D_LOCAL 0
37#include <linux/uwb/debug.h>
38
39#include "uwb-internal.h"
40
41void dump_bytes(struct device *dev, const void *_buf, size_t rsize)
42{
43 const char *buf = _buf;
44 char line[32];
45 size_t offset = 0;
46 int cnt, cnt2;
47 for (cnt = 0; cnt < rsize; cnt += 8) {
48 size_t rtop = rsize - cnt < 8 ? rsize - cnt : 8;
49 for (offset = cnt2 = 0; cnt2 < rtop; cnt2++) {
50 offset += scnprintf(line + offset, sizeof(line) - offset,
51 "%02x ", buf[cnt + cnt2] & 0xff);
52 }
53 if (dev)
54 dev_info(dev, "%s\n", line);
55 else
56 printk(KERN_INFO "%s\n", line);
57 }
58}
59EXPORT_SYMBOL_GPL(dump_bytes);
60
61/*
62 * Debug interface
63 *
64 * Per radio controller debugfs files (in uwb/uwbN/):
65 *
66 * command: Flexible command interface (see <linux/uwb/debug-cmd.h>).
67 *
68 * reservations: information on reservations.
69 *
70 * accept: Set to true (Y or 1) to accept reservation requests from
71 * peers.
72 *
73 * drp_avail: DRP availability information.
74 */
75
76struct uwb_dbg {
77 struct uwb_pal pal;
78
79 u32 accept;
80 struct list_head rsvs;
81
82 struct dentry *root_d;
83 struct dentry *command_f;
84 struct dentry *reservations_f;
85 struct dentry *accept_f;
86 struct dentry *drp_avail_f;
87};
88
89static struct dentry *root_dir;
90
91static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv)
92{
93 struct uwb_rc *rc = rsv->rc;
94 struct device *dev = &rc->uwb_dev.dev;
95 struct uwb_dev_addr devaddr;
96 char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
97
98 uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
99 if (rsv->target.type == UWB_RSV_TARGET_DEV)
100 devaddr = rsv->target.dev->dev_addr;
101 else
102 devaddr = rsv->target.devaddr;
103 uwb_dev_addr_print(target, sizeof(target), &devaddr);
104
105 dev_dbg(dev, "debug: rsv %s -> %s: %s\n",
106 owner, target, uwb_rsv_state_str(rsv->state));
107}
108
109static int cmd_rsv_establish(struct uwb_rc *rc,
110 struct uwb_dbg_cmd_rsv_establish *cmd)
111{
112 struct uwb_mac_addr macaddr;
113 struct uwb_rsv *rsv;
114 struct uwb_dev *target;
115 int ret;
116
117 memcpy(&macaddr, cmd->target, sizeof(macaddr));
118 target = uwb_dev_get_by_macaddr(rc, &macaddr);
119 if (target == NULL)
120 return -ENODEV;
121
122 rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, NULL);
123 if (rsv == NULL) {
124 uwb_dev_put(target);
125 return -ENOMEM;
126 }
127
128 rsv->owner = &rc->uwb_dev;
129 rsv->target.type = UWB_RSV_TARGET_DEV;
130 rsv->target.dev = target;
131 rsv->type = cmd->type;
132 rsv->max_mas = cmd->max_mas;
133 rsv->min_mas = cmd->min_mas;
134 rsv->sparsity = cmd->sparsity;
135
136 ret = uwb_rsv_establish(rsv);
137 if (ret)
138 uwb_rsv_destroy(rsv);
139 else
140 list_add_tail(&rsv->pal_node, &rc->dbg->rsvs);
141
142 return ret;
143}
144
145static int cmd_rsv_terminate(struct uwb_rc *rc,
146 struct uwb_dbg_cmd_rsv_terminate *cmd)
147{
148 struct uwb_rsv *rsv, *found = NULL;
149 int i = 0;
150
151 list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) {
152 if (i == cmd->index) {
153 found = rsv;
154 break;
155 }
156 }
157 if (!found)
158 return -EINVAL;
159
160 list_del(&found->pal_node);
161 uwb_rsv_terminate(found);
162
163 return 0;
164}
165
166static int command_open(struct inode *inode, struct file *file)
167{
168 file->private_data = inode->i_private;
169
170 return 0;
171}
172
173static ssize_t command_write(struct file *file, const char __user *buf,
174 size_t len, loff_t *off)
175{
176 struct uwb_rc *rc = file->private_data;
177 struct uwb_dbg_cmd cmd;
178 int ret;
179
180 if (len != sizeof(struct uwb_dbg_cmd))
181 return -EINVAL;
182
183 if (copy_from_user(&cmd, buf, len) != 0)
184 return -EFAULT;
185
186 switch (cmd.type) {
187 case UWB_DBG_CMD_RSV_ESTABLISH:
188 ret = cmd_rsv_establish(rc, &cmd.rsv_establish);
189 break;
190 case UWB_DBG_CMD_RSV_TERMINATE:
191 ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate);
192 break;
193 default:
194 return -EINVAL;
195 }
196
197 return ret < 0 ? ret : len;
198}
199
200static struct file_operations command_fops = {
201 .open = command_open,
202 .write = command_write,
203 .read = NULL,
204 .llseek = no_llseek,
205 .owner = THIS_MODULE,
206};
207
208static int reservations_print(struct seq_file *s, void *p)
209{
210 struct uwb_rc *rc = s->private;
211 struct uwb_rsv *rsv;
212
213 mutex_lock(&rc->rsvs_mutex);
214
215 list_for_each_entry(rsv, &rc->reservations, rc_node) {
216 struct uwb_dev_addr devaddr;
217 char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
218 bool is_owner;
219 char buf[72];
220
221 uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
222 if (rsv->target.type == UWB_RSV_TARGET_DEV) {
223 devaddr = rsv->target.dev->dev_addr;
224 is_owner = &rc->uwb_dev == rsv->owner;
225 } else {
226 devaddr = rsv->target.devaddr;
227 is_owner = true;
228 }
229 uwb_dev_addr_print(target, sizeof(target), &devaddr);
230
231 seq_printf(s, "%c %s -> %s: %s\n",
232 is_owner ? 'O' : 'T',
233 owner, target, uwb_rsv_state_str(rsv->state));
234 seq_printf(s, " stream: %d type: %s\n",
235 rsv->stream, uwb_rsv_type_str(rsv->type));
236 bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS);
237 seq_printf(s, " %s\n", buf);
238 }
239
240 mutex_unlock(&rc->rsvs_mutex);
241
242 return 0;
243}
244
245static int reservations_open(struct inode *inode, struct file *file)
246{
247 return single_open(file, reservations_print, inode->i_private);
248}
249
250static struct file_operations reservations_fops = {
251 .open = reservations_open,
252 .read = seq_read,
253 .llseek = seq_lseek,
254 .release = single_release,
255 .owner = THIS_MODULE,
256};
257
258static int drp_avail_print(struct seq_file *s, void *p)
259{
260 struct uwb_rc *rc = s->private;
261 char buf[72];
262
263 bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.global, UWB_NUM_MAS);
264 seq_printf(s, "global: %s\n", buf);
265 bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.local, UWB_NUM_MAS);
266 seq_printf(s, "local: %s\n", buf);
267 bitmap_scnprintf(buf, sizeof(buf), rc->drp_avail.pending, UWB_NUM_MAS);
268 seq_printf(s, "pending: %s\n", buf);
269
270 return 0;
271}
272
273static int drp_avail_open(struct inode *inode, struct file *file)
274{
275 return single_open(file, drp_avail_print, inode->i_private);
276}
277
278static struct file_operations drp_avail_fops = {
279 .open = drp_avail_open,
280 .read = seq_read,
281 .llseek = seq_lseek,
282 .release = single_release,
283 .owner = THIS_MODULE,
284};
285
286static void uwb_dbg_new_rsv(struct uwb_rsv *rsv)
287{
288 struct uwb_rc *rc = rsv->rc;
289
290 if (rc->dbg->accept)
291 uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, NULL);
292}
293
294/**
295 * uwb_dbg_add_rc - add a debug interface for a radio controller
296 * @rc: the radio controller
297 */
298void uwb_dbg_add_rc(struct uwb_rc *rc)
299{
300 rc->dbg = kzalloc(sizeof(struct uwb_dbg), GFP_KERNEL);
301 if (rc->dbg == NULL)
302 return;
303
304 INIT_LIST_HEAD(&rc->dbg->rsvs);
305
306 uwb_pal_init(&rc->dbg->pal);
307 rc->dbg->pal.new_rsv = uwb_dbg_new_rsv;
308 uwb_pal_register(rc, &rc->dbg->pal);
309 if (root_dir) {
310 rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev),
311 root_dir);
312 rc->dbg->command_f = debugfs_create_file("command", 0200,
313 rc->dbg->root_d, rc,
314 &command_fops);
315 rc->dbg->reservations_f = debugfs_create_file("reservations", 0444,
316 rc->dbg->root_d, rc,
317 &reservations_fops);
318 rc->dbg->accept_f = debugfs_create_bool("accept", 0644,
319 rc->dbg->root_d,
320 &rc->dbg->accept);
321 rc->dbg->drp_avail_f = debugfs_create_file("drp_avail", 0444,
322 rc->dbg->root_d, rc,
323 &drp_avail_fops);
324 }
325}
326
327/**
328 * uwb_dbg_add_rc - remove a radio controller's debug interface
329 * @rc: the radio controller
330 */
331void uwb_dbg_del_rc(struct uwb_rc *rc)
332{
333 struct uwb_rsv *rsv, *t;
334
335 if (rc->dbg == NULL)
336 return;
337
338 list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) {
339 uwb_rsv_destroy(rsv);
340 }
341
342 uwb_pal_unregister(rc, &rc->dbg->pal);
343
344 if (root_dir) {
345 debugfs_remove(rc->dbg->drp_avail_f);
346 debugfs_remove(rc->dbg->accept_f);
347 debugfs_remove(rc->dbg->reservations_f);
348 debugfs_remove(rc->dbg->command_f);
349 debugfs_remove(rc->dbg->root_d);
350 }
351}
352
353/**
354 * uwb_dbg_exit - initialize the debug interface sub-module
355 */
356void uwb_dbg_init(void)
357{
358 root_dir = debugfs_create_dir("uwb", NULL);
359}
360
361/**
362 * uwb_dbg_exit - clean-up the debug interface sub-module
363 */
364void uwb_dbg_exit(void)
365{
366 debugfs_remove(root_dir);
367}
diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h
new file mode 100644
index 000000000000..2ad307d12961
--- /dev/null
+++ b/drivers/uwb/uwb-internal.h
@@ -0,0 +1,305 @@
1/*
2 * Ultra Wide Band
3 * UWB internal API
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 * This contains most of the internal API for UWB. This is stuff used
23 * across the stack that of course, is of no interest to the rest.
24 *
25 * Some parts might end up going public (like uwb_rc_*())...
26 */
27
28#ifndef __UWB_INTERNAL_H__
29#define __UWB_INTERNAL_H__
30
31#include <linux/version.h>
32#include <linux/kernel.h>
33#include <linux/device.h>
34#include <linux/uwb.h>
35#include <linux/mutex.h>
36
37struct uwb_beca_e;
38
39/* General device API */
40extern void uwb_dev_init(struct uwb_dev *uwb_dev);
41extern int __uwb_dev_offair(struct uwb_dev *, struct uwb_rc *);
42extern int uwb_dev_add(struct uwb_dev *uwb_dev, struct device *parent_dev,
43 struct uwb_rc *parent_rc);
44extern void uwb_dev_rm(struct uwb_dev *uwb_dev);
45extern void uwbd_dev_onair(struct uwb_rc *, struct uwb_beca_e *);
46extern void uwbd_dev_offair(struct uwb_beca_e *);
47void uwb_notify(struct uwb_rc *rc, struct uwb_dev *uwb_dev, enum uwb_notifs event);
48
49/* General UWB Radio Controller Internal API */
50extern struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *);
51static inline struct uwb_rc *__uwb_rc_get(struct uwb_rc *rc)
52{
53 uwb_dev_get(&rc->uwb_dev);
54 return rc;
55}
56
57static inline void __uwb_rc_put(struct uwb_rc *rc)
58{
59 uwb_dev_put(&rc->uwb_dev);
60}
61
62extern int uwb_rc_reset(struct uwb_rc *rc);
63extern int uwb_rc_beacon(struct uwb_rc *rc,
64 int channel, unsigned bpst_offset);
65extern int uwb_rc_scan(struct uwb_rc *rc,
66 unsigned channel, enum uwb_scan_type type,
67 unsigned bpst_offset);
68extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc);
69extern ssize_t uwb_rc_print_IEs(struct uwb_rc *rc, char *, size_t);
70extern void uwb_rc_ie_init(struct uwb_rc *);
71extern void uwb_rc_ie_init(struct uwb_rc *);
72extern ssize_t uwb_rc_ie_setup(struct uwb_rc *);
73extern void uwb_rc_ie_release(struct uwb_rc *);
74extern int uwb_rc_ie_add(struct uwb_rc *,
75 const struct uwb_ie_hdr *, size_t);
76extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie);
77
78extern const char *uwb_rc_strerror(unsigned code);
79
80/*
81 * Time to wait for a response to an RC command.
82 *
83 * Some commands can take a long time to response. e.g., START_BEACON
84 * may scan for several superframes before joining an existing beacon
85 * group and this can take around 600 ms.
86 */
87#define UWB_RC_CMD_TIMEOUT_MS 1000 /* ms */
88
89/*
90 * Notification/Event Handlers
91 */
92
93struct uwb_rc_neh;
94
95void uwb_rc_neh_create(struct uwb_rc *rc);
96void uwb_rc_neh_destroy(struct uwb_rc *rc);
97
98struct uwb_rc_neh *uwb_rc_neh_add(struct uwb_rc *rc, struct uwb_rccb *cmd,
99 u8 expected_type, u16 expected_event,
100 uwb_rc_cmd_cb_f cb, void *arg);
101void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh);
102void uwb_rc_neh_arm(struct uwb_rc *rc, struct uwb_rc_neh *neh);
103void uwb_rc_neh_put(struct uwb_rc_neh *neh);
104
105/* Event size tables */
106extern int uwb_est_create(void);
107extern void uwb_est_destroy(void);
108
109
110/*
111 * UWB Events & management daemon
112 */
113
114/**
115 * enum uwb_event_type - types of UWB management daemon events
116 *
117 * The UWB management daemon (uwbd) can receive two types of events:
118 * UWB_EVT_TYPE_NOTIF - notification from the radio controller.
119 * UWB_EVT_TYPE_MSG - a simple message.
120 */
121enum uwb_event_type {
122 UWB_EVT_TYPE_NOTIF,
123 UWB_EVT_TYPE_MSG,
124};
125
126/**
127 * struct uwb_event_notif - an event for a radio controller notification
128 * @size: Size of the buffer (ie: Guaranteed to contain at least
129 * a full 'struct uwb_rceb')
130 * @rceb: Pointer to a kmalloced() event payload
131 */
132struct uwb_event_notif {
133 size_t size;
134 struct uwb_rceb *rceb;
135};
136
137/**
138 * enum uwb_event_message - an event for a message for asynchronous processing
139 *
140 * UWB_EVT_MSG_RESET - reset the radio controller and all PAL hardware.
141 */
142enum uwb_event_message {
143 UWB_EVT_MSG_RESET,
144};
145
146/**
147 * UWB Event
148 * @rc: Radio controller that emitted the event (referenced)
149 * @ts_jiffies: Timestamp, when was it received
150 * @type: This event's type.
151 */
152struct uwb_event {
153 struct list_head list_node;
154 struct uwb_rc *rc;
155 unsigned long ts_jiffies;
156 enum uwb_event_type type;
157 union {
158 struct uwb_event_notif notif;
159 enum uwb_event_message message;
160 };
161};
162
163extern void uwbd_start(void);
164extern void uwbd_stop(void);
165extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask);
166extern void uwbd_event_queue(struct uwb_event *);
167void uwbd_flush(struct uwb_rc *rc);
168
169/* UWB event handlers */
170extern int uwbd_evt_handle_rc_beacon(struct uwb_event *);
171extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *);
172extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *);
173extern int uwbd_evt_handle_rc_bp_slot_change(struct uwb_event *);
174extern int uwbd_evt_handle_rc_drp(struct uwb_event *);
175extern int uwbd_evt_handle_rc_drp_avail(struct uwb_event *);
176
177int uwbd_msg_handle_reset(struct uwb_event *evt);
178
179
180/*
181 * Address management
182 */
183int uwb_rc_dev_addr_assign(struct uwb_rc *rc);
184int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt);
185
186/*
187 * UWB Beacon Cache
188 *
189 * Each beacon we received is kept in a cache--when we receive that
190 * beacon consistently, that means there is a new device that we have
191 * to add to the system.
192 */
193
194extern unsigned long beacon_timeout_ms;
195
196/** Beacon cache list */
197struct uwb_beca {
198 struct list_head list;
199 size_t entries;
200 struct mutex mutex;
201};
202
203extern struct uwb_beca uwb_beca;
204
205/**
206 * Beacon cache entry
207 *
208 * @jiffies_refresh: last time a beacon was received that refreshed
209 * this cache entry.
210 * @uwb_dev: device connected to this beacon. This pointer is not
211 * safe, you need to get it with uwb_dev_try_get()
212 *
213 * @hits: how many time we have seen this beacon since last time we
214 * cleared it
215 */
216struct uwb_beca_e {
217 struct mutex mutex;
218 struct kref refcnt;
219 struct list_head node;
220 struct uwb_mac_addr *mac_addr;
221 struct uwb_dev_addr dev_addr;
222 u8 hits;
223 unsigned long ts_jiffies;
224 struct uwb_dev *uwb_dev;
225 struct uwb_rc_evt_beacon *be;
226 struct stats lqe_stats, rssi_stats; /* radio statistics */
227};
228struct uwb_beacon_frame;
229extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *,
230 char *, size_t);
231extern struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *,
232 struct uwb_beacon_frame *,
233 unsigned long);
234
235extern void uwb_bce_kfree(struct kref *_bce);
236static inline void uwb_bce_get(struct uwb_beca_e *bce)
237{
238 kref_get(&bce->refcnt);
239}
240static inline void uwb_bce_put(struct uwb_beca_e *bce)
241{
242 kref_put(&bce->refcnt, uwb_bce_kfree);
243}
244extern void uwb_beca_purge(void);
245extern void uwb_beca_release(void);
246
247struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
248 const struct uwb_dev_addr *devaddr);
249struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc,
250 const struct uwb_mac_addr *macaddr);
251
252/* -- UWB Sysfs representation */
253extern struct class uwb_rc_class;
254extern struct device_attribute dev_attr_mac_address;
255extern struct device_attribute dev_attr_beacon;
256extern struct device_attribute dev_attr_scan;
257
258/* -- DRP Bandwidth allocator: bandwidth allocations, reservations, DRP */
259void uwb_rsv_init(struct uwb_rc *rc);
260int uwb_rsv_setup(struct uwb_rc *rc);
261void uwb_rsv_cleanup(struct uwb_rc *rc);
262
263void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state);
264void uwb_rsv_remove(struct uwb_rsv *rsv);
265struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
266 struct uwb_ie_drp *drp_ie);
267void uwb_rsv_sched_update(struct uwb_rc *rc);
268
269void uwb_drp_handle_timeout(struct uwb_rsv *rsv);
270int uwb_drp_ie_update(struct uwb_rsv *rsv);
271void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie);
272
273void uwb_drp_avail_init(struct uwb_rc *rc);
274int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas);
275void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas);
276void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas);
277void uwb_drp_avail_ie_update(struct uwb_rc *rc);
278
279/* -- PAL support */
280void uwb_rc_pal_init(struct uwb_rc *rc);
281
282/* -- Misc */
283
284extern ssize_t uwb_mac_frame_hdr_print(char *, size_t,
285 const struct uwb_mac_frame_hdr *);
286
287/* -- Debug interface */
288void uwb_dbg_init(void);
289void uwb_dbg_exit(void);
290void uwb_dbg_add_rc(struct uwb_rc *rc);
291void uwb_dbg_del_rc(struct uwb_rc *rc);
292
293/* Workarounds for version specific stuff */
294
295static inline void uwb_dev_lock(struct uwb_dev *uwb_dev)
296{
297 down(&uwb_dev->dev.sem);
298}
299
300static inline void uwb_dev_unlock(struct uwb_dev *uwb_dev)
301{
302 up(&uwb_dev->dev.sem);
303}
304
305#endif /* #ifndef __UWB_INTERNAL_H__ */
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
new file mode 100644
index 000000000000..78908416e42c
--- /dev/null
+++ b/drivers/uwb/uwbd.c
@@ -0,0 +1,410 @@
1/*
2 * Ultra Wide Band
3 * Neighborhood Management Daemon
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * This daemon takes care of maintaing information that describes the
24 * UWB neighborhood that the radios in this machine can see. It also
25 * keeps a tab of which devices are visible, makes sure each HC sits
26 * on a different channel to avoid interfering, etc.
27 *
28 * Different drivers (radio controller, device, any API in general)
29 * communicate with this daemon through an event queue. Daemon wakes
30 * up, takes a list of events and handles them one by one; handling
31 * function is extracted from a table based on the event's type and
32 * subtype. Events are freed only if the handling function says so.
33 *
34 * . Lock protecting the event list has to be an spinlock and locked
35 * with IRQSAVE because it might be called from an interrupt
36 * context (ie: when events arrive and the notification drops
37 * down from the ISR).
38 *
39 * . UWB radio controller drivers queue events to the daemon using
40 * uwbd_event_queue(). They just get the event, chew it to make it
41 * look like UWBD likes it and pass it in a buffer allocated with
42 * uwb_event_alloc().
43 *
44 * EVENTS
45 *
46 * Events have a type, a subtype, a lenght, some other stuff and the
47 * data blob, which depends on the event. The header is 'struct
48 * uwb_event'; for payloads, see 'struct uwbd_evt_*'.
49 *
50 * EVENT HANDLER TABLES
51 *
52 * To find a handling function for an event, the type is used to index
53 * a subtype-table in the type-table. The subtype-table is indexed
54 * with the subtype to get the function that handles the event. Start
55 * with the main type-table 'uwbd_evt_type_handler'.
56 *
57 * DEVICES
58 *
59 * Devices are created when a bunch of beacons have been received and
60 * it is stablished that the device has stable radio presence. CREATED
61 * only, not configured. Devices are ONLY configured when an
62 * Application-Specific IE Probe is receieved, in which the device
63 * declares which Protocol ID it groks. Then the device is CONFIGURED
64 * (and the driver->probe() stuff of the device model is invoked).
65 *
66 * Devices are considered disconnected when a certain number of
67 * beacons are not received in an amount of time.
68 *
69 * Handler functions are called normally uwbd_evt_handle_*().
70 */
71
72#include <linux/kthread.h>
73#include <linux/module.h>
74#include <linux/freezer.h>
75#include "uwb-internal.h"
76
77#define D_LOCAL 1
78#include <linux/uwb/debug.h>
79
80
81/**
82 * UWBD Event handler function signature
83 *
84 * Return !0 if the event needs not to be freed (ie the handler
85 * takes/took care of it). 0 means the daemon code will free the
86 * event.
87 *
88 * @evt->rc is already referenced and guaranteed to exist. See
89 * uwb_evt_handle().
90 */
91typedef int (*uwbd_evt_handler_f)(struct uwb_event *);
92
93/**
94 * Properties of a UWBD event
95 *
96 * @handler: the function that will handle this event
97 * @name: text name of event
98 */
99struct uwbd_event {
100 uwbd_evt_handler_f handler;
101 const char *name;
102};
103
104/** Table of handlers for and properties of the UWBD Radio Control Events */
105static
106struct uwbd_event uwbd_events[] = {
107 [UWB_RC_EVT_BEACON] = {
108 .handler = uwbd_evt_handle_rc_beacon,
109 .name = "BEACON_RECEIVED"
110 },
111 [UWB_RC_EVT_BEACON_SIZE] = {
112 .handler = uwbd_evt_handle_rc_beacon_size,
113 .name = "BEACON_SIZE_CHANGE"
114 },
115 [UWB_RC_EVT_BPOIE_CHANGE] = {
116 .handler = uwbd_evt_handle_rc_bpoie_change,
117 .name = "BPOIE_CHANGE"
118 },
119 [UWB_RC_EVT_BP_SLOT_CHANGE] = {
120 .handler = uwbd_evt_handle_rc_bp_slot_change,
121 .name = "BP_SLOT_CHANGE"
122 },
123 [UWB_RC_EVT_DRP_AVAIL] = {
124 .handler = uwbd_evt_handle_rc_drp_avail,
125 .name = "DRP_AVAILABILITY_CHANGE"
126 },
127 [UWB_RC_EVT_DRP] = {
128 .handler = uwbd_evt_handle_rc_drp,
129 .name = "DRP"
130 },
131 [UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
132 .handler = uwbd_evt_handle_rc_dev_addr_conflict,
133 .name = "DEV_ADDR_CONFLICT",
134 },
135};
136
137
138
139struct uwbd_evt_type_handler {
140 const char *name;
141 struct uwbd_event *uwbd_events;
142 size_t size;
143};
144
145#define UWBD_EVT_TYPE_HANDLER(n,a) { \
146 .name = (n), \
147 .uwbd_events = (a), \
148 .size = sizeof(a)/sizeof((a)[0]) \
149}
150
151
152/** Table of handlers for each UWBD Event type. */
153static
154struct uwbd_evt_type_handler uwbd_evt_type_handlers[] = {
155 [UWB_RC_CET_GENERAL] = UWBD_EVT_TYPE_HANDLER("RC", uwbd_events)
156};
157
158static const
159size_t uwbd_evt_type_handlers_len =
160 sizeof(uwbd_evt_type_handlers) / sizeof(uwbd_evt_type_handlers[0]);
161
162static const struct uwbd_event uwbd_message_handlers[] = {
163 [UWB_EVT_MSG_RESET] = {
164 .handler = uwbd_msg_handle_reset,
165 .name = "reset",
166 },
167};
168
169static DEFINE_MUTEX(uwbd_event_mutex);
170
171/**
172 * Handle an URC event passed to the UWB Daemon
173 *
174 * @evt: the event to handle
175 * @returns: 0 if the event can be kfreed, !0 on the contrary
176 * (somebody else took ownership) [coincidentally, returning
177 * a <0 errno code will free it :)].
178 *
179 * Looks up the two indirection tables (one for the type, one for the
180 * subtype) to decide which function handles it and then calls the
181 * handler.
182 *
183 * The event structure passed to the event handler has the radio
184 * controller in @evt->rc referenced. The reference will be dropped
185 * once the handler returns, so if it needs it for longer (async),
186 * it'll need to take another one.
187 */
188static
189int uwbd_event_handle_urc(struct uwb_event *evt)
190{
191 struct uwbd_evt_type_handler *type_table;
192 uwbd_evt_handler_f handler;
193 u8 type, context;
194 u16 event;
195
196 type = evt->notif.rceb->bEventType;
197 event = le16_to_cpu(evt->notif.rceb->wEvent);
198 context = evt->notif.rceb->bEventContext;
199
200 if (type > uwbd_evt_type_handlers_len) {
201 printk(KERN_ERR "UWBD: event type %u: unknown (too high)\n", type);
202 return -EINVAL;
203 }
204 type_table = &uwbd_evt_type_handlers[type];
205 if (type_table->uwbd_events == NULL) {
206 printk(KERN_ERR "UWBD: event type %u: unknown\n", type);
207 return -EINVAL;
208 }
209 if (event > type_table->size) {
210 printk(KERN_ERR "UWBD: event %s[%u]: unknown (too high)\n",
211 type_table->name, event);
212 return -EINVAL;
213 }
214 handler = type_table->uwbd_events[event].handler;
215 if (handler == NULL) {
216 printk(KERN_ERR "UWBD: event %s[%u]: unknown\n", type_table->name, event);
217 return -EINVAL;
218 }
219 return (*handler)(evt);
220}
221
222static void uwbd_event_handle_message(struct uwb_event *evt)
223{
224 struct uwb_rc *rc;
225 int result;
226
227 rc = evt->rc;
228
229 if (evt->message < 0 || evt->message >= ARRAY_SIZE(uwbd_message_handlers)) {
230 dev_err(&rc->uwb_dev.dev, "UWBD: invalid message type %d\n", evt->message);
231 return;
232 }
233
234 /* If this is a reset event we need to drop the
235 * uwbd_event_mutex or it deadlocks when the reset handler
236 * attempts to flush the uwbd events. */
237 if (evt->message == UWB_EVT_MSG_RESET)
238 mutex_unlock(&uwbd_event_mutex);
239
240 result = uwbd_message_handlers[evt->message].handler(evt);
241 if (result < 0)
242 dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n",
243 uwbd_message_handlers[evt->message].name, result);
244
245 if (evt->message == UWB_EVT_MSG_RESET)
246 mutex_lock(&uwbd_event_mutex);
247}
248
249static void uwbd_event_handle(struct uwb_event *evt)
250{
251 struct uwb_rc *rc;
252 int should_keep;
253
254 rc = evt->rc;
255
256 if (rc->ready) {
257 switch (evt->type) {
258 case UWB_EVT_TYPE_NOTIF:
259 should_keep = uwbd_event_handle_urc(evt);
260 if (should_keep <= 0)
261 kfree(evt->notif.rceb);
262 break;
263 case UWB_EVT_TYPE_MSG:
264 uwbd_event_handle_message(evt);
265 break;
266 default:
267 dev_err(&rc->uwb_dev.dev, "UWBD: invalid event type %d\n", evt->type);
268 break;
269 }
270 }
271
272 __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */
273}
274/* The UWB Daemon */
275
276
277/** Daemon's PID: used to decide if we can queue or not */
278static int uwbd_pid;
279/** Daemon's task struct for managing the kthread */
280static struct task_struct *uwbd_task;
281/** Daemon's waitqueue for waiting for new events */
282static DECLARE_WAIT_QUEUE_HEAD(uwbd_wq);
283/** Daemon's list of events; we queue/dequeue here */
284static struct list_head uwbd_event_list = LIST_HEAD_INIT(uwbd_event_list);
285/** Daemon's list lock to protect concurent access */
286static DEFINE_SPINLOCK(uwbd_event_list_lock);
287
288
289/**
290 * UWB Daemon
291 *
292 * Listens to all UWB notifications and takes care to track the state
293 * of the UWB neighboorhood for the kernel. When we do a run, we
294 * spinlock, move the list to a private copy and release the
295 * lock. Hold it as little as possible. Not a conflict: it is
296 * guaranteed we own the events in the private list.
297 *
298 * FIXME: should change so we don't have a 1HZ timer all the time, but
299 * only if there are devices.
300 */
301static int uwbd(void *unused)
302{
303 unsigned long flags;
304 struct list_head list = LIST_HEAD_INIT(list);
305 struct uwb_event *evt, *nxt;
306 int should_stop = 0;
307 while (1) {
308 wait_event_interruptible_timeout(
309 uwbd_wq,
310 !list_empty(&uwbd_event_list)
311 || (should_stop = kthread_should_stop()),
312 HZ);
313 if (should_stop)
314 break;
315 try_to_freeze();
316
317 mutex_lock(&uwbd_event_mutex);
318 spin_lock_irqsave(&uwbd_event_list_lock, flags);
319 list_splice_init(&uwbd_event_list, &list);
320 spin_unlock_irqrestore(&uwbd_event_list_lock, flags);
321 list_for_each_entry_safe(evt, nxt, &list, list_node) {
322 list_del(&evt->list_node);
323 uwbd_event_handle(evt);
324 kfree(evt);
325 }
326 mutex_unlock(&uwbd_event_mutex);
327
328 uwb_beca_purge(); /* Purge devices that left */
329 }
330 return 0;
331}
332
333
334/** Start the UWB daemon */
335void uwbd_start(void)
336{
337 uwbd_task = kthread_run(uwbd, NULL, "uwbd");
338 if (uwbd_task == NULL)
339 printk(KERN_ERR "UWB: Cannot start management daemon; "
340 "UWB won't work\n");
341 else
342 uwbd_pid = uwbd_task->pid;
343}
344
345/* Stop the UWB daemon and free any unprocessed events */
346void uwbd_stop(void)
347{
348 unsigned long flags;
349 struct uwb_event *evt, *nxt;
350 kthread_stop(uwbd_task);
351 spin_lock_irqsave(&uwbd_event_list_lock, flags);
352 uwbd_pid = 0;
353 list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) {
354 if (evt->type == UWB_EVT_TYPE_NOTIF)
355 kfree(evt->notif.rceb);
356 kfree(evt);
357 }
358 spin_unlock_irqrestore(&uwbd_event_list_lock, flags);
359 uwb_beca_release();
360}
361
362/*
363 * Queue an event for the management daemon
364 *
365 * When some lower layer receives an event, it uses this function to
366 * push it forward to the UWB daemon.
367 *
368 * Once you pass the event, you don't own it any more, but the daemon
369 * does. It will uwb_event_free() it when done, so make sure you
370 * uwb_event_alloc()ed it or bad things will happen.
371 *
372 * If the daemon is not running, we just free the event.
373 */
374void uwbd_event_queue(struct uwb_event *evt)
375{
376 unsigned long flags;
377 spin_lock_irqsave(&uwbd_event_list_lock, flags);
378 if (uwbd_pid != 0) {
379 list_add(&evt->list_node, &uwbd_event_list);
380 wake_up_all(&uwbd_wq);
381 } else {
382 __uwb_rc_put(evt->rc);
383 if (evt->type == UWB_EVT_TYPE_NOTIF)
384 kfree(evt->notif.rceb);
385 kfree(evt);
386 }
387 spin_unlock_irqrestore(&uwbd_event_list_lock, flags);
388 return;
389}
390
391void uwbd_flush(struct uwb_rc *rc)
392{
393 struct uwb_event *evt, *nxt;
394
395 mutex_lock(&uwbd_event_mutex);
396
397 spin_lock_irq(&uwbd_event_list_lock);
398 list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) {
399 if (evt->rc == rc) {
400 __uwb_rc_put(rc);
401 list_del(&evt->list_node);
402 if (evt->type == UWB_EVT_TYPE_NOTIF)
403 kfree(evt->notif.rceb);
404 kfree(evt);
405 }
406 }
407 spin_unlock_irq(&uwbd_event_list_lock);
408
409 mutex_unlock(&uwbd_event_mutex);
410}
diff --git a/drivers/uwb/whc-rc.c b/drivers/uwb/whc-rc.c
new file mode 100644
index 000000000000..1711deadb114
--- /dev/null
+++ b/drivers/uwb/whc-rc.c
@@ -0,0 +1,520 @@
1/*
2 * Wireless Host Controller: Radio Control Interface (WHCI v0.95[2.3])
3 * Radio Control command/event transport to the UWB stack
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * Initialize and hook up the Radio Control interface.
24 *
25 * For each device probed, creates an 'struct whcrc' which contains
26 * just the representation of the UWB Radio Controller, and the logic
27 * for reading notifications and passing them to the UWB Core.
28 *
29 * So we initialize all of those, register the UWB Radio Controller
30 * and setup the notification/event handle to pipe the notifications
31 * to the UWB management Daemon.
32 *
33 * Once uwb_rc_add() is called, the UWB stack takes control, resets
34 * the radio and readies the device to take commands the UWB
35 * API/user-space.
36 *
37 * Note this driver is just a transport driver; the commands are
38 * formed at the UWB stack and given to this driver who will deliver
39 * them to the hw and transfer the replies/notifications back to the
40 * UWB stack through the UWB daemon (UWBD).
41 */
42#include <linux/version.h>
43#include <linux/init.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/dma-mapping.h>
47#include <linux/interrupt.h>
48#include <linux/workqueue.h>
49#include <linux/uwb.h>
50#include <linux/uwb/whci.h>
51#include <linux/uwb/umc.h>
52#include "uwb-internal.h"
53
54#define D_LOCAL 0
55#include <linux/uwb/debug.h>
56
57/**
58 * Descriptor for an instance of the UWB Radio Control Driver that
59 * attaches to the URC interface of the WHCI PCI card.
60 *
61 * Unless there is a lock specific to the 'data members', all access
62 * is protected by uwb_rc->mutex.
63 */
64struct whcrc {
65 struct umc_dev *umc_dev;
66 struct uwb_rc *uwb_rc; /* UWB host controller */
67
68 unsigned long area;
69 void __iomem *rc_base;
70 size_t rc_len;
71 spinlock_t irq_lock;
72
73 void *evt_buf, *cmd_buf;
74 dma_addr_t evt_dma_buf, cmd_dma_buf;
75 wait_queue_head_t cmd_wq;
76 struct work_struct event_work;
77};
78
79/**
80 * Execute an UWB RC command on WHCI/RC
81 *
82 * @rc: Instance of a Radio Controller that is a whcrc
83 * @cmd: Buffer containing the RCCB and payload to execute
84 * @cmd_size: Size of the command buffer.
85 *
86 * We copy the command into whcrc->cmd_buf (as it is pretty and
87 * aligned`and physically contiguous) and then press the right keys in
88 * the controller's URCCMD register to get it to read it. We might
89 * have to wait for the cmd_sem to be open to us.
90 *
91 * NOTE: rc's mutex has to be locked
92 */
93static int whcrc_cmd(struct uwb_rc *uwb_rc,
94 const struct uwb_rccb *cmd, size_t cmd_size)
95{
96 int result = 0;
97 struct whcrc *whcrc = uwb_rc->priv;
98 struct device *dev = &whcrc->umc_dev->dev;
99 u32 urccmd;
100
101 d_fnstart(3, dev, "(%p, %p, %zu)\n", uwb_rc, cmd, cmd_size);
102 might_sleep();
103
104 if (cmd_size >= 4096) {
105 result = -E2BIG;
106 goto error;
107 }
108
109 /*
110 * If the URC is halted, then the hardware has reset itself.
111 * Attempt to recover by restarting the device and then return
112 * an error as it's likely that the current command isn't
113 * valid for a newly started RC.
114 */
115 if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) {
116 dev_err(dev, "requesting reset of halted radio controller\n");
117 uwb_rc_reset_all(uwb_rc);
118 result = -EIO;
119 goto error;
120 }
121
122 result = wait_event_timeout(whcrc->cmd_wq,
123 !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2);
124 if (result == 0) {
125 dev_err(dev, "device is not ready to execute commands\n");
126 result = -ETIMEDOUT;
127 goto error;
128 }
129
130 memmove(whcrc->cmd_buf, cmd, cmd_size);
131 le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR);
132
133 spin_lock(&whcrc->irq_lock);
134 urccmd = le_readl(whcrc->rc_base + URCCMD);
135 urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK);
136 le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size,
137 whcrc->rc_base + URCCMD);
138 spin_unlock(&whcrc->irq_lock);
139
140error:
141 d_fnend(3, dev, "(%p, %p, %zu) = %d\n",
142 uwb_rc, cmd, cmd_size, result);
143 return result;
144}
145
146static int whcrc_reset(struct uwb_rc *rc)
147{
148 struct whcrc *whcrc = rc->priv;
149
150 return umc_controller_reset(whcrc->umc_dev);
151}
152
153/**
154 * Reset event reception mechanism and tell hw we are ready to get more
155 *
156 * We have read all the events in the event buffer, so we are ready to
157 * reset it to the beginning.
158 *
159 * This is only called during initialization or after an event buffer
160 * has been retired. This means we can be sure that event processing
161 * is disabled and it's safe to update the URCEVTADDR register.
162 *
163 * There's no need to wait for the event processing to start as the
164 * URC will not clear URCCMD_ACTIVE until (internal) event buffer
165 * space is available.
166 */
167static
168void whcrc_enable_events(struct whcrc *whcrc)
169{
170 struct device *dev = &whcrc->umc_dev->dev;
171 u32 urccmd;
172
173 d_fnstart(4, dev, "(whcrc %p)\n", whcrc);
174
175 le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR);
176
177 spin_lock(&whcrc->irq_lock);
178 urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE;
179 le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD);
180 spin_unlock(&whcrc->irq_lock);
181
182 d_fnend(4, dev, "(whcrc %p) = void\n", whcrc);
183}
184
185static void whcrc_event_work(struct work_struct *work)
186{
187 struct whcrc *whcrc = container_of(work, struct whcrc, event_work);
188 struct device *dev = &whcrc->umc_dev->dev;
189 size_t size;
190 u64 urcevtaddr;
191
192 urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR);
193 size = urcevtaddr & URCEVTADDR_OFFSET_MASK;
194
195 d_printf(3, dev, "received %zu octet event\n", size);
196 d_dump(4, dev, whcrc->evt_buf, size > 32 ? 32 : size);
197
198 uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size);
199 whcrc_enable_events(whcrc);
200}
201
202/**
203 * Catch interrupts?
204 *
205 * We ack inmediately (and expect the hw to do the right thing and
206 * raise another IRQ if things have changed :)
207 */
208static
209irqreturn_t whcrc_irq_cb(int irq, void *_whcrc)
210{
211 struct whcrc *whcrc = _whcrc;
212 struct device *dev = &whcrc->umc_dev->dev;
213 u32 urcsts;
214
215 urcsts = le_readl(whcrc->rc_base + URCSTS);
216 if (!(urcsts & URCSTS_INT_MASK))
217 return IRQ_NONE;
218 le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS);
219
220 d_printf(4, dev, "acked 0x%08x, urcsts 0x%08x\n",
221 le_readl(whcrc->rc_base + URCSTS), urcsts);
222
223 if (urcsts & URCSTS_HSE) {
224 dev_err(dev, "host system error -- hardware halted\n");
225 /* FIXME: do something sensible here */
226 goto out;
227 }
228 if (urcsts & URCSTS_ER) {
229 d_printf(3, dev, "ER: event ready\n");
230 schedule_work(&whcrc->event_work);
231 }
232 if (urcsts & URCSTS_RCI) {
233 d_printf(3, dev, "RCI: ready to execute another command\n");
234 wake_up_all(&whcrc->cmd_wq);
235 }
236out:
237 return IRQ_HANDLED;
238}
239
240
241/**
242 * Initialize a UMC RC interface: map regions, get (shared) IRQ
243 */
244static
245int whcrc_setup_rc_umc(struct whcrc *whcrc)
246{
247 int result = 0;
248 struct device *dev = &whcrc->umc_dev->dev;
249 struct umc_dev *umc_dev = whcrc->umc_dev;
250
251 whcrc->area = umc_dev->resource.start;
252 whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1;
253 result = -EBUSY;
254 if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME)
255 == NULL) {
256 dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n",
257 whcrc->rc_len, whcrc->area, result);
258 goto error_request_region;
259 }
260
261 whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len);
262 if (whcrc->rc_base == NULL) {
263 dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n",
264 whcrc->rc_len, whcrc->area, result);
265 goto error_ioremap_nocache;
266 }
267
268 result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED,
269 KBUILD_MODNAME, whcrc);
270 if (result < 0) {
271 dev_err(dev, "can't allocate IRQ %d: %d\n",
272 umc_dev->irq, result);
273 goto error_request_irq;
274 }
275
276 result = -ENOMEM;
277 whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
278 &whcrc->cmd_dma_buf, GFP_KERNEL);
279 if (whcrc->cmd_buf == NULL) {
280 dev_err(dev, "Can't allocate cmd transfer buffer\n");
281 goto error_cmd_buffer;
282 }
283
284 whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
285 &whcrc->evt_dma_buf, GFP_KERNEL);
286 if (whcrc->evt_buf == NULL) {
287 dev_err(dev, "Can't allocate evt transfer buffer\n");
288 goto error_evt_buffer;
289 }
290 d_printf(3, dev, "UWB RC Interface: %zu bytes at 0x%p, irq %u\n",
291 whcrc->rc_len, whcrc->rc_base, umc_dev->irq);
292 return 0;
293
294error_evt_buffer:
295 dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
296 whcrc->cmd_dma_buf);
297error_cmd_buffer:
298 free_irq(umc_dev->irq, whcrc);
299error_request_irq:
300 iounmap(whcrc->rc_base);
301error_ioremap_nocache:
302 release_mem_region(whcrc->area, whcrc->rc_len);
303error_request_region:
304 return result;
305}
306
307
308/**
309 * Release RC's UMC resources
310 */
311static
312void whcrc_release_rc_umc(struct whcrc *whcrc)
313{
314 struct umc_dev *umc_dev = whcrc->umc_dev;
315
316 dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf,
317 whcrc->evt_dma_buf);
318 dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
319 whcrc->cmd_dma_buf);
320 free_irq(umc_dev->irq, whcrc);
321 iounmap(whcrc->rc_base);
322 release_mem_region(whcrc->area, whcrc->rc_len);
323}
324
325
326/**
327 * whcrc_start_rc - start a WHCI radio controller
328 * @whcrc: the radio controller to start
329 *
330 * Reset the UMC device, start the radio controller, enable events and
331 * finally enable interrupts.
332 */
333static int whcrc_start_rc(struct uwb_rc *rc)
334{
335 struct whcrc *whcrc = rc->priv;
336 int result = 0;
337 struct device *dev = &whcrc->umc_dev->dev;
338 unsigned long start, duration;
339
340 /* Reset the thing */
341 le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD);
342 if (d_test(3))
343 start = jiffies;
344 if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0,
345 5000, "device to reset at init") < 0) {
346 result = -EBUSY;
347 goto error;
348 } else if (d_test(3)) {
349 duration = jiffies - start;
350 if (duration > msecs_to_jiffies(40))
351 dev_err(dev, "Device took %ums to "
352 "reset. MAX expected: 40ms\n",
353 jiffies_to_msecs(duration));
354 }
355
356 /* Set the event buffer, start the controller (enable IRQs later) */
357 le_writel(0, whcrc->rc_base + URCINTR);
358 le_writel(URCCMD_RS, whcrc->rc_base + URCCMD);
359 result = -ETIMEDOUT;
360 if (d_test(3))
361 start = jiffies;
362 if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0,
363 5000, "device to start") < 0)
364 goto error;
365 if (d_test(3)) {
366 duration = jiffies - start;
367 if (duration > msecs_to_jiffies(40))
368 dev_err(dev, "Device took %ums to start. "
369 "MAX expected: 40ms\n",
370 jiffies_to_msecs(duration));
371 }
372 whcrc_enable_events(whcrc);
373 result = 0;
374 le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR);
375error:
376 return result;
377}
378
379
380/**
381 * whcrc_stop_rc - stop a WHCI radio controller
382 * @whcrc: the radio controller to stop
383 *
384 * Disable interrupts and cancel any pending event processing work
385 * before clearing the Run/Stop bit.
386 */
387static
388void whcrc_stop_rc(struct uwb_rc *rc)
389{
390 struct whcrc *whcrc = rc->priv;
391 struct umc_dev *umc_dev = whcrc->umc_dev;
392
393 le_writel(0, whcrc->rc_base + URCINTR);
394 cancel_work_sync(&whcrc->event_work);
395
396 le_writel(0, whcrc->rc_base + URCCMD);
397 whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS,
398 URCSTS_HALTED, 0, 40, "URCSTS.HALTED");
399}
400
401static void whcrc_init(struct whcrc *whcrc)
402{
403 spin_lock_init(&whcrc->irq_lock);
404 init_waitqueue_head(&whcrc->cmd_wq);
405 INIT_WORK(&whcrc->event_work, whcrc_event_work);
406}
407
408/**
409 * Initialize the radio controller.
410 *
411 * NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the
412 * IRQ handler we use that to determine if the hw is ready to
413 * handle events. Looks like a race condition, but it really is
414 * not.
415 */
416static
417int whcrc_probe(struct umc_dev *umc_dev)
418{
419 int result;
420 struct uwb_rc *uwb_rc;
421 struct whcrc *whcrc;
422 struct device *dev = &umc_dev->dev;
423
424 d_fnstart(3, dev, "(umc_dev %p)\n", umc_dev);
425 result = -ENOMEM;
426 uwb_rc = uwb_rc_alloc();
427 if (uwb_rc == NULL) {
428 dev_err(dev, "unable to allocate RC instance\n");
429 goto error_rc_alloc;
430 }
431 whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL);
432 if (whcrc == NULL) {
433 dev_err(dev, "unable to allocate WHC-RC instance\n");
434 goto error_alloc;
435 }
436 whcrc_init(whcrc);
437 whcrc->umc_dev = umc_dev;
438
439 result = whcrc_setup_rc_umc(whcrc);
440 if (result < 0) {
441 dev_err(dev, "Can't setup RC UMC interface: %d\n", result);
442 goto error_setup_rc_umc;
443 }
444 whcrc->uwb_rc = uwb_rc;
445
446 uwb_rc->owner = THIS_MODULE;
447 uwb_rc->cmd = whcrc_cmd;
448 uwb_rc->reset = whcrc_reset;
449 uwb_rc->start = whcrc_start_rc;
450 uwb_rc->stop = whcrc_stop_rc;
451
452 result = uwb_rc_add(uwb_rc, dev, whcrc);
453 if (result < 0)
454 goto error_rc_add;
455 umc_set_drvdata(umc_dev, whcrc);
456 d_fnend(3, dev, "(umc_dev %p) = 0\n", umc_dev);
457 return 0;
458
459error_rc_add:
460 whcrc_release_rc_umc(whcrc);
461error_setup_rc_umc:
462 kfree(whcrc);
463error_alloc:
464 uwb_rc_put(uwb_rc);
465error_rc_alloc:
466 d_fnend(3, dev, "(umc_dev %p) = %d\n", umc_dev, result);
467 return result;
468}
469
470/**
471 * Clean up the radio control resources
472 *
473 * When we up the command semaphore, everybody possibly held trying to
474 * execute a command should be granted entry and then they'll see the
475 * host is quiescing and up it (so it will chain to the next waiter).
476 * This should not happen (in any case), as we can only remove when
477 * there are no handles open...
478 */
479static void whcrc_remove(struct umc_dev *umc_dev)
480{
481 struct whcrc *whcrc = umc_get_drvdata(umc_dev);
482 struct uwb_rc *uwb_rc = whcrc->uwb_rc;
483
484 umc_set_drvdata(umc_dev, NULL);
485 uwb_rc_rm(uwb_rc);
486 whcrc_release_rc_umc(whcrc);
487 kfree(whcrc);
488 uwb_rc_put(uwb_rc);
489 d_printf(1, &umc_dev->dev, "freed whcrc %p\n", whcrc);
490}
491
492/* PCI device ID's that we handle [so it gets loaded] */
493static struct pci_device_id whcrc_id_table[] = {
494 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
495 { /* empty last entry */ }
496};
497MODULE_DEVICE_TABLE(pci, whcrc_id_table);
498
499static struct umc_driver whcrc_driver = {
500 .name = "whc-rc",
501 .cap_id = UMC_CAP_ID_WHCI_RC,
502 .probe = whcrc_probe,
503 .remove = whcrc_remove,
504};
505
506static int __init whcrc_driver_init(void)
507{
508 return umc_driver_register(&whcrc_driver);
509}
510module_init(whcrc_driver_init);
511
512static void __exit whcrc_driver_exit(void)
513{
514 umc_driver_unregister(&whcrc_driver);
515}
516module_exit(whcrc_driver_exit);
517
518MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
519MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver");
520MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/whci.c b/drivers/uwb/whci.c
new file mode 100644
index 000000000000..3df2388f908f
--- /dev/null
+++ b/drivers/uwb/whci.c
@@ -0,0 +1,269 @@
1/*
2 * WHCI UWB Multi-interface Controller enumerator.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This file is released under the GNU GPL v2.
7 */
8#include <linux/delay.h>
9#include <linux/kernel.h>
10#include <linux/pci.h>
11#include <linux/dma-mapping.h>
12#include <linux/uwb/whci.h>
13#include <linux/uwb/umc.h>
14
15struct whci_card {
16 struct pci_dev *pci;
17 void __iomem *uwbbase;
18 u8 n_caps;
19 struct umc_dev *devs[0];
20};
21
22
23/* Fix faulty HW :( */
24static
25u64 whci_capdata_quirks(struct whci_card *card, u64 capdata)
26{
27 u64 capdata_orig = capdata;
28 struct pci_dev *pci_dev = card->pci;
29 if (pci_dev->vendor == PCI_VENDOR_ID_INTEL
30 && (pci_dev->device == 0x0c3b || pci_dev->device == 0004)
31 && pci_dev->class == 0x0d1010) {
32 switch (UWBCAPDATA_TO_CAP_ID(capdata)) {
33 /* WLP capability has 0x100 bytes of aperture */
34 case 0x80:
35 capdata |= 0x40 << 8; break;
36 /* WUSB capability has 0x80 bytes of aperture
37 * and ID is 1 */
38 case 0x02:
39 capdata &= ~0xffff;
40 capdata |= 0x2001;
41 break;
42 }
43 }
44 if (capdata_orig != capdata)
45 dev_warn(&pci_dev->dev,
46 "PCI v%04x d%04x c%06x#%02x: "
47 "corrected capdata from %016Lx to %016Lx\n",
48 pci_dev->vendor, pci_dev->device, pci_dev->class,
49 (unsigned)UWBCAPDATA_TO_CAP_ID(capdata),
50 (unsigned long long)capdata_orig,
51 (unsigned long long)capdata);
52 return capdata;
53}
54
55
56/**
57 * whci_wait_for - wait for a WHCI register to be set
58 *
59 * Polls (for at most @max_ms ms) until '*@reg & @mask == @result'.
60 */
61int whci_wait_for(struct device *dev, u32 __iomem *reg, u32 mask, u32 result,
62 unsigned long max_ms, const char *tag)
63{
64 unsigned t = 0;
65 u32 val;
66 for (;;) {
67 val = le_readl(reg);
68 if ((val & mask) == result)
69 break;
70 msleep(10);
71 if (t >= max_ms) {
72 dev_err(dev, "timed out waiting for %s ", tag);
73 return -ETIMEDOUT;
74 }
75 t += 10;
76 }
77 return 0;
78}
79EXPORT_SYMBOL_GPL(whci_wait_for);
80
81
82/*
83 * NOTE: the capinfo and capdata registers are slightly different
84 * (size and cap-id fields). So for cap #0, we need to fill
85 * in. Size comes from the size of the register block
86 * (statically calculated); cap_id comes from nowhere, we use
87 * zero, that is reserved, for the radio controller, because
88 * none was defined at the spec level.
89 */
90static int whci_add_cap(struct whci_card *card, int n)
91{
92 struct umc_dev *umc;
93 u64 capdata;
94 int bar, err;
95
96 umc = umc_device_create(&card->pci->dev, n);
97 if (umc == NULL)
98 return -ENOMEM;
99
100 capdata = le_readq(card->uwbbase + UWBCAPDATA(n));
101
102 bar = UWBCAPDATA_TO_BAR(capdata) << 1;
103
104 capdata = whci_capdata_quirks(card, capdata);
105 /* Capability 0 is the radio controller. It's size is 32
106 * bytes (WHCI0.95[2.3, T2-9]). */
107 umc->version = UWBCAPDATA_TO_VERSION(capdata);
108 umc->cap_id = n == 0 ? 0 : UWBCAPDATA_TO_CAP_ID(capdata);
109 umc->bar = bar;
110 umc->resource.start = pci_resource_start(card->pci, bar)
111 + UWBCAPDATA_TO_OFFSET(capdata);
112 umc->resource.end = umc->resource.start
113 + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1;
114 umc->resource.name = umc->dev.bus_id;
115 umc->resource.flags = card->pci->resource[bar].flags;
116 umc->resource.parent = &card->pci->resource[bar];
117 umc->irq = card->pci->irq;
118
119 err = umc_device_register(umc);
120 if (err < 0)
121 goto error;
122 card->devs[n] = umc;
123 return 0;
124
125error:
126 kfree(umc);
127 return err;
128}
129
130static void whci_del_cap(struct whci_card *card, int n)
131{
132 struct umc_dev *umc = card->devs[n];
133
134 if (umc != NULL)
135 umc_device_unregister(umc);
136}
137
138static int whci_n_caps(struct pci_dev *pci)
139{
140 void __iomem *uwbbase;
141 u64 capinfo;
142
143 uwbbase = pci_iomap(pci, 0, 8);
144 if (!uwbbase)
145 return -ENOMEM;
146 capinfo = le_readq(uwbbase + UWBCAPINFO);
147 pci_iounmap(pci, uwbbase);
148
149 return UWBCAPINFO_TO_N_CAPS(capinfo);
150}
151
152static int whci_probe(struct pci_dev *pci, const struct pci_device_id *id)
153{
154 struct whci_card *card;
155 int err, n_caps, n;
156
157 err = pci_enable_device(pci);
158 if (err < 0)
159 goto error;
160 pci_enable_msi(pci);
161 pci_set_master(pci);
162 err = -ENXIO;
163 if (!pci_set_dma_mask(pci, DMA_64BIT_MASK))
164 pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK);
165 else if (!pci_set_dma_mask(pci, DMA_32BIT_MASK))
166 pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK);
167 else
168 goto error_dma;
169
170 err = n_caps = whci_n_caps(pci);
171 if (n_caps < 0)
172 goto error_ncaps;
173
174 err = -ENOMEM;
175 card = kzalloc(sizeof(struct whci_card)
176 + sizeof(struct whci_dev *) * (n_caps + 1),
177 GFP_KERNEL);
178 if (card == NULL)
179 goto error_kzalloc;
180 card->pci = pci;
181 card->n_caps = n_caps;
182
183 err = -EBUSY;
184 if (!request_mem_region(pci_resource_start(pci, 0),
185 UWBCAPDATA_SIZE(card->n_caps),
186 "whci (capability data)"))
187 goto error_request_memregion;
188 err = -ENOMEM;
189 card->uwbbase = pci_iomap(pci, 0, UWBCAPDATA_SIZE(card->n_caps));
190 if (!card->uwbbase)
191 goto error_iomap;
192
193 /* Add each capability. */
194 for (n = 0; n <= card->n_caps; n++) {
195 err = whci_add_cap(card, n);
196 if (err < 0 && n == 0) {
197 dev_err(&pci->dev, "cannot bind UWB radio controller:"
198 " %d\n", err);
199 goto error_bind;
200 }
201 if (err < 0)
202 dev_warn(&pci->dev, "warning: cannot bind capability "
203 "#%u: %d\n", n, err);
204 }
205 pci_set_drvdata(pci, card);
206 return 0;
207
208error_bind:
209 pci_iounmap(pci, card->uwbbase);
210error_iomap:
211 release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps));
212error_request_memregion:
213 kfree(card);
214error_kzalloc:
215error_ncaps:
216error_dma:
217 pci_disable_msi(pci);
218 pci_disable_device(pci);
219error:
220 return err;
221}
222
223static void whci_remove(struct pci_dev *pci)
224{
225 struct whci_card *card = pci_get_drvdata(pci);
226 int n;
227
228 pci_set_drvdata(pci, NULL);
229 /* Unregister each capability in reverse (so the master device
230 * is unregistered last). */
231 for (n = card->n_caps; n >= 0 ; n--)
232 whci_del_cap(card, n);
233 pci_iounmap(pci, card->uwbbase);
234 release_mem_region(pci_resource_start(pci, 0), UWBCAPDATA_SIZE(card->n_caps));
235 kfree(card);
236 pci_disable_msi(pci);
237 pci_disable_device(pci);
238}
239
240static struct pci_device_id whci_id_table[] = {
241 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
242 { 0 },
243};
244MODULE_DEVICE_TABLE(pci, whci_id_table);
245
246
247static struct pci_driver whci_driver = {
248 .name = "whci",
249 .id_table = whci_id_table,
250 .probe = whci_probe,
251 .remove = whci_remove,
252};
253
254static int __init whci_init(void)
255{
256 return pci_register_driver(&whci_driver);
257}
258
259static void __exit whci_exit(void)
260{
261 pci_unregister_driver(&whci_driver);
262}
263
264module_init(whci_init);
265module_exit(whci_exit);
266
267MODULE_DESCRIPTION("WHCI UWB Multi-interface Controller enumerator");
268MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
269MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/wlp/Makefile b/drivers/uwb/wlp/Makefile
new file mode 100644
index 000000000000..c72c11db5b1b
--- /dev/null
+++ b/drivers/uwb/wlp/Makefile
@@ -0,0 +1,10 @@
1obj-$(CONFIG_UWB_WLP) := wlp.o
2
3wlp-objs := \
4 driver.o \
5 eda.o \
6 messages.o \
7 sysfs.o \
8 txrx.o \
9 wlp-lc.o \
10 wss-lc.o
diff --git a/drivers/uwb/wlp/driver.c b/drivers/uwb/wlp/driver.c
new file mode 100644
index 000000000000..cb8d699b6a67
--- /dev/null
+++ b/drivers/uwb/wlp/driver.c
@@ -0,0 +1,43 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 *
4 * Copyright (C) 2007 Intel Corporation
5 * Reinette Chatre <reinette.chatre@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Life cycle of WLP substack
23 *
24 * FIXME: Docs
25 */
26
27#include <linux/module.h>
28
29static int __init wlp_subsys_init(void)
30{
31 return 0;
32}
33module_init(wlp_subsys_init);
34
35static void __exit wlp_subsys_exit(void)
36{
37 return;
38}
39module_exit(wlp_subsys_exit);
40
41MODULE_AUTHOR("Reinette Chatre <reinette.chatre@intel.com>");
42MODULE_DESCRIPTION("WiMedia Logical Link Control Protocol (WLP)");
43MODULE_LICENSE("GPL");
diff --git a/drivers/uwb/wlp/eda.c b/drivers/uwb/wlp/eda.c
new file mode 100644
index 000000000000..cdfe8dfc4340
--- /dev/null
+++ b/drivers/uwb/wlp/eda.c
@@ -0,0 +1,449 @@
1/*
2 * WUSB Wire Adapter: WLP interface
3 * Ethernet to device address cache
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * We need to be able to map ethernet addresses to device addresses
24 * and back because there is not explicit relationship between the eth
25 * addresses used in the ETH frames and the device addresses (no, it
26 * would not have been simpler to force as ETH address the MBOA MAC
27 * address...no, not at all :).
28 *
29 * A device has one MBOA MAC address and one device address. It is possible
30 * for a device to have more than one virtual MAC address (although a
31 * virtual address can be the same as the MBOA MAC address). The device
32 * address is guaranteed to be unique among the devices in the extended
33 * beacon group (see ECMA 17.1.1). We thus use the device address as index
34 * to this cache. We do allow searching based on virtual address as this
35 * is how Ethernet frames will be addressed.
36 *
37 * We need to support virtual EUI-48. Although, right now the virtual
38 * EUI-48 will always be the same as the MAC SAP address. The EDA cache
39 * entry thus contains a MAC SAP address as well as the virtual address
40 * (used to map the network stack address to a neighbor). When we move
41 * to support more than one virtual MAC on a host then this organization
42 * will have to change. Perhaps a neighbor has a list of WSSs, each with a
43 * tag and virtual EUI-48.
44 *
45 * On data transmission
46 * it is used to determine if the neighbor is connected and what WSS it
47 * belongs to. With this we know what tag to add to the WLP frame. Storing
48 * the WSS in the EDA cache may be overkill because we only support one
49 * WSS. Hopefully we will support more than one WSS at some point.
50 * On data reception it is used to determine the WSS based on
51 * the tag and address of the transmitting neighbor.
52 */
53
54#define D_LOCAL 5
55#include <linux/netdevice.h>
56#include <linux/uwb/debug.h>
57#include <linux/etherdevice.h>
58#include <linux/wlp.h>
59#include "wlp-internal.h"
60
61
62/* FIXME: cache is not purged, only on device close */
63
64/* FIXME: does not scale, change to dynamic array */
65
66/*
67 * Initialize the EDA cache
68 *
69 * @returns 0 if ok, < 0 errno code on error
70 *
71 * Call when the interface is being brought up
72 *
73 * NOTE: Keep it as a separate function as the implementation will
74 * change and be more complex.
75 */
76void wlp_eda_init(struct wlp_eda *eda)
77{
78 INIT_LIST_HEAD(&eda->cache);
79 spin_lock_init(&eda->lock);
80}
81
82/*
83 * Release the EDA cache
84 *
85 * @returns 0 if ok, < 0 errno code on error
86 *
87 * Called when the interface is brought down
88 */
89void wlp_eda_release(struct wlp_eda *eda)
90{
91 unsigned long flags;
92 struct wlp_eda_node *itr, *next;
93
94 spin_lock_irqsave(&eda->lock, flags);
95 list_for_each_entry_safe(itr, next, &eda->cache, list_node) {
96 list_del(&itr->list_node);
97 kfree(itr);
98 }
99 spin_unlock_irqrestore(&eda->lock, flags);
100}
101
102/*
103 * Add an address mapping
104 *
105 * @returns 0 if ok, < 0 errno code on error
106 *
107 * An address mapping is initially created when the neighbor device is seen
108 * for the first time (it is "onair"). At this time the neighbor is not
109 * connected or associated with a WSS so we only populate the Ethernet and
110 * Device address fields.
111 *
112 */
113int wlp_eda_create_node(struct wlp_eda *eda,
114 const unsigned char eth_addr[ETH_ALEN],
115 const struct uwb_dev_addr *dev_addr)
116{
117 int result = 0;
118 struct wlp_eda_node *itr;
119 unsigned long flags;
120
121 BUG_ON(dev_addr == NULL || eth_addr == NULL);
122 spin_lock_irqsave(&eda->lock, flags);
123 list_for_each_entry(itr, &eda->cache, list_node) {
124 if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
125 printk(KERN_ERR "EDA cache already contains entry "
126 "for neighbor %02x:%02x\n",
127 dev_addr->data[1], dev_addr->data[0]);
128 result = -EEXIST;
129 goto out_unlock;
130 }
131 }
132 itr = kzalloc(sizeof(*itr), GFP_ATOMIC);
133 if (itr != NULL) {
134 memcpy(itr->eth_addr, eth_addr, sizeof(itr->eth_addr));
135 itr->dev_addr = *dev_addr;
136 list_add(&itr->list_node, &eda->cache);
137 } else
138 result = -ENOMEM;
139out_unlock:
140 spin_unlock_irqrestore(&eda->lock, flags);
141 return result;
142}
143
144/*
145 * Remove entry from EDA cache
146 *
147 * This is done when the device goes off air.
148 */
149void wlp_eda_rm_node(struct wlp_eda *eda, const struct uwb_dev_addr *dev_addr)
150{
151 struct wlp_eda_node *itr, *next;
152 unsigned long flags;
153
154 spin_lock_irqsave(&eda->lock, flags);
155 list_for_each_entry_safe(itr, next, &eda->cache, list_node) {
156 if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
157 list_del(&itr->list_node);
158 kfree(itr);
159 break;
160 }
161 }
162 spin_unlock_irqrestore(&eda->lock, flags);
163}
164
165/*
166 * Update an address mapping
167 *
168 * @returns 0 if ok, < 0 errno code on error
169 */
170int wlp_eda_update_node(struct wlp_eda *eda,
171 const struct uwb_dev_addr *dev_addr,
172 struct wlp_wss *wss,
173 const unsigned char virt_addr[ETH_ALEN],
174 const u8 tag, const enum wlp_wss_connect state)
175{
176 int result = -ENOENT;
177 struct wlp_eda_node *itr;
178 unsigned long flags;
179
180 spin_lock_irqsave(&eda->lock, flags);
181 list_for_each_entry(itr, &eda->cache, list_node) {
182 if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
183 /* Found it, update it */
184 itr->wss = wss;
185 memcpy(itr->virt_addr, virt_addr,
186 sizeof(itr->virt_addr));
187 itr->tag = tag;
188 itr->state = state;
189 result = 0;
190 goto out_unlock;
191 }
192 }
193 /* Not found */
194out_unlock:
195 spin_unlock_irqrestore(&eda->lock, flags);
196 return result;
197}
198
199/*
200 * Update only state field of an address mapping
201 *
202 * @returns 0 if ok, < 0 errno code on error
203 */
204int wlp_eda_update_node_state(struct wlp_eda *eda,
205 const struct uwb_dev_addr *dev_addr,
206 const enum wlp_wss_connect state)
207{
208 int result = -ENOENT;
209 struct wlp_eda_node *itr;
210 unsigned long flags;
211
212 spin_lock_irqsave(&eda->lock, flags);
213 list_for_each_entry(itr, &eda->cache, list_node) {
214 if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
215 /* Found it, update it */
216 itr->state = state;
217 result = 0;
218 goto out_unlock;
219 }
220 }
221 /* Not found */
222out_unlock:
223 spin_unlock_irqrestore(&eda->lock, flags);
224 return result;
225}
226
227/*
228 * Return contents of EDA cache entry
229 *
230 * @dev_addr: index to EDA cache
231 * @eda_entry: pointer to where contents of EDA cache will be copied
232 */
233int wlp_copy_eda_node(struct wlp_eda *eda, struct uwb_dev_addr *dev_addr,
234 struct wlp_eda_node *eda_entry)
235{
236 int result = -ENOENT;
237 struct wlp_eda_node *itr;
238 unsigned long flags;
239
240 spin_lock_irqsave(&eda->lock, flags);
241 list_for_each_entry(itr, &eda->cache, list_node) {
242 if (!memcmp(&itr->dev_addr, dev_addr, sizeof(itr->dev_addr))) {
243 *eda_entry = *itr;
244 result = 0;
245 goto out_unlock;
246 }
247 }
248 /* Not found */
249out_unlock:
250 spin_unlock_irqrestore(&eda->lock, flags);
251 return result;
252}
253
254/*
255 * Execute function for every element in the cache
256 *
257 * @function: function to execute on element of cache (must be atomic)
258 * @priv: private data of function
259 * @returns: result of first function that failed, or last function
260 * executed if no function failed.
261 *
262 * Stop executing when function returns error for any element in cache.
263 *
264 * IMPORTANT: We are using a spinlock here: the function executed on each
265 * element has to be atomic.
266 */
267int wlp_eda_for_each(struct wlp_eda *eda, wlp_eda_for_each_f function,
268 void *priv)
269{
270 int result = 0;
271 struct wlp *wlp = container_of(eda, struct wlp, eda);
272 struct wlp_eda_node *entry;
273 unsigned long flags;
274
275 spin_lock_irqsave(&eda->lock, flags);
276 list_for_each_entry(entry, &eda->cache, list_node) {
277 result = (*function)(wlp, entry, priv);
278 if (result < 0)
279 break;
280 }
281 spin_unlock_irqrestore(&eda->lock, flags);
282 return result;
283}
284
285/*
286 * Execute function for single element in the cache (return dev addr)
287 *
288 * @virt_addr: index into EDA cache used to determine which element to
289 * execute the function on
290 * @dev_addr: device address of element in cache will be returned using
291 * @dev_addr
292 * @function: function to execute on element of cache (must be atomic)
293 * @priv: private data of function
294 * @returns: result of function
295 *
296 * IMPORTANT: We are using a spinlock here: the function executed on the
297 * element has to be atomic.
298 */
299int wlp_eda_for_virtual(struct wlp_eda *eda,
300 const unsigned char virt_addr[ETH_ALEN],
301 struct uwb_dev_addr *dev_addr,
302 wlp_eda_for_each_f function,
303 void *priv)
304{
305 int result = 0;
306 struct wlp *wlp = container_of(eda, struct wlp, eda);
307 struct device *dev = &wlp->rc->uwb_dev.dev;
308 struct wlp_eda_node *itr;
309 unsigned long flags;
310 int found = 0;
311
312 spin_lock_irqsave(&eda->lock, flags);
313 list_for_each_entry(itr, &eda->cache, list_node) {
314 if (!memcmp(itr->virt_addr, virt_addr,
315 sizeof(itr->virt_addr))) {
316 d_printf(6, dev, "EDA: looking for "
317 "%02x:%02x:%02x:%02x:%02x:%02x hit %02x:%02x "
318 "wss %p tag 0x%02x state %u\n",
319 virt_addr[0], virt_addr[1],
320 virt_addr[2], virt_addr[3],
321 virt_addr[4], virt_addr[5],
322 itr->dev_addr.data[1],
323 itr->dev_addr.data[0], itr->wss,
324 itr->tag, itr->state);
325 result = (*function)(wlp, itr, priv);
326 *dev_addr = itr->dev_addr;
327 found = 1;
328 break;
329 } else
330 d_printf(6, dev, "EDA: looking for "
331 "%02x:%02x:%02x:%02x:%02x:%02x "
332 "against "
333 "%02x:%02x:%02x:%02x:%02x:%02x miss\n",
334 virt_addr[0], virt_addr[1],
335 virt_addr[2], virt_addr[3],
336 virt_addr[4], virt_addr[5],
337 itr->virt_addr[0], itr->virt_addr[1],
338 itr->virt_addr[2], itr->virt_addr[3],
339 itr->virt_addr[4], itr->virt_addr[5]);
340 }
341 if (!found) {
342 if (printk_ratelimit())
343 dev_err(dev, "EDA: Eth addr %02x:%02x:%02x"
344 ":%02x:%02x:%02x not found.\n",
345 virt_addr[0], virt_addr[1],
346 virt_addr[2], virt_addr[3],
347 virt_addr[4], virt_addr[5]);
348 result = -ENODEV;
349 }
350 spin_unlock_irqrestore(&eda->lock, flags);
351 return result;
352}
353
354static const char *__wlp_wss_connect_state[] = { "WLP_WSS_UNCONNECTED",
355 "WLP_WSS_CONNECTED",
356 "WLP_WSS_CONNECT_FAILED",
357};
358
359static const char *wlp_wss_connect_state_str(unsigned id)
360{
361 if (id >= ARRAY_SIZE(__wlp_wss_connect_state))
362 return "unknown WSS connection state";
363 return __wlp_wss_connect_state[id];
364}
365
366/*
367 * View EDA cache from user space
368 *
369 * A debugging feature to give user visibility into the EDA cache. Also
370 * used to display members of WSS to user (called from wlp_wss_members_show())
371 */
372ssize_t wlp_eda_show(struct wlp *wlp, char *buf)
373{
374 ssize_t result = 0;
375 struct wlp_eda_node *entry;
376 unsigned long flags;
377 struct wlp_eda *eda = &wlp->eda;
378 spin_lock_irqsave(&eda->lock, flags);
379 result = scnprintf(buf, PAGE_SIZE, "#eth_addr dev_addr wss_ptr "
380 "tag state virt_addr\n");
381 list_for_each_entry(entry, &eda->cache, list_node) {
382 result += scnprintf(buf + result, PAGE_SIZE - result,
383 "%02x:%02x:%02x:%02x:%02x:%02x %02x:%02x "
384 "%p 0x%02x %s "
385 "%02x:%02x:%02x:%02x:%02x:%02x\n",
386 entry->eth_addr[0], entry->eth_addr[1],
387 entry->eth_addr[2], entry->eth_addr[3],
388 entry->eth_addr[4], entry->eth_addr[5],
389 entry->dev_addr.data[1],
390 entry->dev_addr.data[0], entry->wss,
391 entry->tag,
392 wlp_wss_connect_state_str(entry->state),
393 entry->virt_addr[0], entry->virt_addr[1],
394 entry->virt_addr[2], entry->virt_addr[3],
395 entry->virt_addr[4], entry->virt_addr[5]);
396 if (result >= PAGE_SIZE)
397 break;
398 }
399 spin_unlock_irqrestore(&eda->lock, flags);
400 return result;
401}
402EXPORT_SYMBOL_GPL(wlp_eda_show);
403
404/*
405 * Add new EDA cache entry based on user input in sysfs
406 *
407 * Should only be used for debugging.
408 *
409 * The WSS is assumed to be the only WSS supported. This needs to be
410 * redesigned when we support more than one WSS.
411 */
412ssize_t wlp_eda_store(struct wlp *wlp, const char *buf, size_t size)
413{
414 ssize_t result;
415 struct wlp_eda *eda = &wlp->eda;
416 u8 eth_addr[6];
417 struct uwb_dev_addr dev_addr;
418 u8 tag;
419 unsigned state;
420
421 result = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx "
422 "%02hhx:%02hhx %02hhx %u\n",
423 &eth_addr[0], &eth_addr[1],
424 &eth_addr[2], &eth_addr[3],
425 &eth_addr[4], &eth_addr[5],
426 &dev_addr.data[1], &dev_addr.data[0], &tag, &state);
427 switch (result) {
428 case 6: /* no dev addr specified -- remove entry NOT IMPLEMENTED */
429 /*result = wlp_eda_rm(eda, eth_addr, &dev_addr);*/
430 result = -ENOSYS;
431 break;
432 case 10:
433 state = state >= 1 ? 1 : 0;
434 result = wlp_eda_create_node(eda, eth_addr, &dev_addr);
435 if (result < 0 && result != -EEXIST)
436 goto error;
437 /* Set virtual addr to be same as MAC */
438 result = wlp_eda_update_node(eda, &dev_addr, &wlp->wss,
439 eth_addr, tag, state);
440 if (result < 0)
441 goto error;
442 break;
443 default: /* bad format */
444 result = -EINVAL;
445 }
446error:
447 return result < 0 ? result : size;
448}
449EXPORT_SYMBOL_GPL(wlp_eda_store);
diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
new file mode 100644
index 000000000000..a64cb8241713
--- /dev/null
+++ b/drivers/uwb/wlp/messages.c
@@ -0,0 +1,1946 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 * Message construction and parsing
4 *
5 * Copyright (C) 2007 Intel Corporation
6 * Reinette Chatre <reinette.chatre@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 */
25
26#include <linux/wlp.h>
27#define D_LOCAL 6
28#include <linux/uwb/debug.h>
29#include "wlp-internal.h"
30
31static
32const char *__wlp_assoc_frame[] = {
33 [WLP_ASSOC_D1] = "WLP_ASSOC_D1",
34 [WLP_ASSOC_D2] = "WLP_ASSOC_D2",
35 [WLP_ASSOC_M1] = "WLP_ASSOC_M1",
36 [WLP_ASSOC_M2] = "WLP_ASSOC_M2",
37 [WLP_ASSOC_M3] = "WLP_ASSOC_M3",
38 [WLP_ASSOC_M4] = "WLP_ASSOC_M4",
39 [WLP_ASSOC_M5] = "WLP_ASSOC_M5",
40 [WLP_ASSOC_M6] = "WLP_ASSOC_M6",
41 [WLP_ASSOC_M7] = "WLP_ASSOC_M7",
42 [WLP_ASSOC_M8] = "WLP_ASSOC_M8",
43 [WLP_ASSOC_F0] = "WLP_ASSOC_F0",
44 [WLP_ASSOC_E1] = "WLP_ASSOC_E1",
45 [WLP_ASSOC_E2] = "WLP_ASSOC_E2",
46 [WLP_ASSOC_C1] = "WLP_ASSOC_C1",
47 [WLP_ASSOC_C2] = "WLP_ASSOC_C2",
48 [WLP_ASSOC_C3] = "WLP_ASSOC_C3",
49 [WLP_ASSOC_C4] = "WLP_ASSOC_C4",
50};
51
52static const char *wlp_assoc_frame_str(unsigned id)
53{
54 if (id >= ARRAY_SIZE(__wlp_assoc_frame))
55 return "unknown association frame";
56 return __wlp_assoc_frame[id];
57}
58
59static const char *__wlp_assc_error[] = {
60 "none",
61 "Authenticator Failure",
62 "Rogue activity suspected",
63 "Device busy",
64 "Setup Locked",
65 "Registrar not ready",
66 "Invalid WSS selection",
67 "Message timeout",
68 "Enrollment session timeout",
69 "Device password invalid",
70 "Unsupported version",
71 "Internal error",
72 "Undefined error",
73 "Numeric comparison failure",
74 "Waiting for user input",
75};
76
77static const char *wlp_assc_error_str(unsigned id)
78{
79 if (id >= ARRAY_SIZE(__wlp_assc_error))
80 return "unknown WLP association error";
81 return __wlp_assc_error[id];
82}
83
84static inline void wlp_set_attr_hdr(struct wlp_attr_hdr *hdr, unsigned type,
85 size_t len)
86{
87 hdr->type = cpu_to_le16(type);
88 hdr->length = cpu_to_le16(len);
89}
90
91/*
92 * Populate fields of a constant sized attribute
93 *
94 * @returns: total size of attribute including size of new value
95 *
96 * We have two instances of this function (wlp_pset and wlp_set): one takes
97 * the value as a parameter, the other takes a pointer to the value as
98 * parameter. They thus only differ in how the value is assigned to the
99 * attribute.
100 *
101 * We use sizeof(*attr) - sizeof(struct wlp_attr_hdr) instead of
102 * sizeof(type) to be able to use this same code for the structures that
103 * contain 8bit enum values and be able to deal with pointer types.
104 */
105#define wlp_set(type, type_code, name) \
106static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \
107{ \
108 d_fnstart(6, NULL, "(attribute %p)\n", attr); \
109 wlp_set_attr_hdr(&attr->hdr, type_code, \
110 sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \
111 attr->name = value; \
112 d_dump(6, NULL, attr, sizeof(*attr)); \
113 d_fnend(6, NULL, "(attribute %p)\n", attr); \
114 return sizeof(*attr); \
115}
116
117#define wlp_pset(type, type_code, name) \
118static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \
119{ \
120 d_fnstart(6, NULL, "(attribute %p)\n", attr); \
121 wlp_set_attr_hdr(&attr->hdr, type_code, \
122 sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \
123 attr->name = *value; \
124 d_dump(6, NULL, attr, sizeof(*attr)); \
125 d_fnend(6, NULL, "(attribute %p)\n", attr); \
126 return sizeof(*attr); \
127}
128
129/**
130 * Populate fields of a variable attribute
131 *
132 * @returns: total size of attribute including size of new value
133 *
134 * Provided with a pointer to the memory area reserved for the
135 * attribute structure, the field is populated with the value. The
136 * reserved memory has to contain enough space for the value.
137 */
138#define wlp_vset(type, type_code, name) \
139static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value, \
140 size_t len) \
141{ \
142 d_fnstart(6, NULL, "(attribute %p)\n", attr); \
143 wlp_set_attr_hdr(&attr->hdr, type_code, len); \
144 memcpy(attr->name, value, len); \
145 d_dump(6, NULL, attr, sizeof(*attr) + len); \
146 d_fnend(6, NULL, "(attribute %p)\n", attr); \
147 return sizeof(*attr) + len; \
148}
149
150wlp_vset(char *, WLP_ATTR_DEV_NAME, dev_name)
151wlp_vset(char *, WLP_ATTR_MANUF, manufacturer)
152wlp_set(enum wlp_assoc_type, WLP_ATTR_MSG_TYPE, msg_type)
153wlp_vset(char *, WLP_ATTR_MODEL_NAME, model_name)
154wlp_vset(char *, WLP_ATTR_MODEL_NR, model_nr)
155wlp_vset(char *, WLP_ATTR_SERIAL, serial)
156wlp_vset(char *, WLP_ATTR_WSS_NAME, wss_name)
157wlp_pset(struct wlp_uuid *, WLP_ATTR_UUID_E, uuid_e)
158wlp_pset(struct wlp_uuid *, WLP_ATTR_UUID_R, uuid_r)
159wlp_pset(struct wlp_uuid *, WLP_ATTR_WSSID, wssid)
160wlp_pset(struct wlp_dev_type *, WLP_ATTR_PRI_DEV_TYPE, prim_dev_type)
161/*wlp_pset(struct wlp_dev_type *, WLP_ATTR_SEC_DEV_TYPE, sec_dev_type)*/
162wlp_set(u8, WLP_ATTR_WLP_VER, version)
163wlp_set(enum wlp_assc_error, WLP_ATTR_WLP_ASSC_ERR, wlp_assc_err)
164wlp_set(enum wlp_wss_sel_mthd, WLP_ATTR_WSS_SEL_MTHD, wss_sel_mthd)
165wlp_set(u8, WLP_ATTR_ACC_ENRL, accept_enrl)
166wlp_set(u8, WLP_ATTR_WSS_SEC_STAT, wss_sec_status)
167wlp_pset(struct uwb_mac_addr *, WLP_ATTR_WSS_BCAST, wss_bcast)
168wlp_pset(struct wlp_nonce *, WLP_ATTR_ENRL_NONCE, enonce)
169wlp_pset(struct wlp_nonce *, WLP_ATTR_REG_NONCE, rnonce)
170wlp_set(u8, WLP_ATTR_WSS_TAG, wss_tag)
171wlp_pset(struct uwb_mac_addr *, WLP_ATTR_WSS_VIRT, wss_virt)
172
173/**
174 * Fill in the WSS information attributes
175 *
176 * We currently only support one WSS, and this is assumed in this function
177 * that can populate only one WSS information attribute.
178 */
179static size_t wlp_set_wss_info(struct wlp_attr_wss_info *attr,
180 struct wlp_wss *wss)
181{
182 size_t datalen;
183 void *ptr = attr->wss_info;
184 size_t used = sizeof(*attr);
185 d_fnstart(6, NULL, "(attribute %p)\n", attr);
186 datalen = sizeof(struct wlp_wss_info) + strlen(wss->name);
187 wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen);
188 used = wlp_set_wssid(ptr, &wss->wssid);
189 used += wlp_set_wss_name(ptr + used, wss->name, strlen(wss->name));
190 used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll);
191 used += wlp_set_wss_sec_status(ptr + used, wss->secure_status);
192 used += wlp_set_wss_bcast(ptr + used, &wss->bcast);
193 d_dump(6, NULL, attr, sizeof(*attr) + datalen);
194 d_fnend(6, NULL, "(attribute %p, used %d)\n",
195 attr, (int)(sizeof(*attr) + used));
196 return sizeof(*attr) + used;
197}
198
199/**
200 * Verify attribute header
201 *
202 * @hdr: Pointer to attribute header that will be verified.
203 * @type: Expected attribute type.
204 * @len: Expected length of attribute value (excluding header).
205 *
206 * Most attribute values have a known length even when they do have a
207 * length field. This knowledge can be used via this function to verify
208 * that the length field matches the expected value.
209 */
210static int wlp_check_attr_hdr(struct wlp *wlp, struct wlp_attr_hdr *hdr,
211 enum wlp_attr_type type, unsigned len)
212{
213 struct device *dev = &wlp->rc->uwb_dev.dev;
214
215 if (le16_to_cpu(hdr->type) != type) {
216 dev_err(dev, "WLP: unexpected header type. Expected "
217 "%u, got %u.\n", type, le16_to_cpu(hdr->type));
218 return -EINVAL;
219 }
220 if (le16_to_cpu(hdr->length) != len) {
221 dev_err(dev, "WLP: unexpected length in header. Expected "
222 "%u, got %u.\n", len, le16_to_cpu(hdr->length));
223 return -EINVAL;
224 }
225 return 0;
226}
227
228/**
229 * Check if header of WSS information attribute valid
230 *
231 * @returns: length of WSS attributes (value of length attribute field) if
232 * valid WSS information attribute found
233 * -ENODATA if no WSS information attribute found
234 * -EIO other error occured
235 *
236 * The WSS information attribute is optional. The function will be provided
237 * with a pointer to data that could _potentially_ be a WSS information
238 * attribute. If a valid WSS information attribute is found it will return
239 * 0, if no WSS information attribute is found it will return -ENODATA, and
240 * another error will be returned if it is a WSS information attribute, but
241 * some parsing failure occured.
242 */
243static int wlp_check_wss_info_attr_hdr(struct wlp *wlp,
244 struct wlp_attr_hdr *hdr, size_t buflen)
245{
246 struct device *dev = &wlp->rc->uwb_dev.dev;
247 size_t len;
248 int result = 0;
249
250 if (buflen < sizeof(*hdr)) {
251 dev_err(dev, "WLP: Not enough space in buffer to parse"
252 " WSS information attribute header.\n");
253 result = -EIO;
254 goto out;
255 }
256 if (le16_to_cpu(hdr->type) != WLP_ATTR_WSS_INFO) {
257 /* WSS information is optional */
258 result = -ENODATA;
259 goto out;
260 }
261 len = le16_to_cpu(hdr->length);
262 if (buflen < sizeof(*hdr) + len) {
263 dev_err(dev, "WLP: Not enough space in buffer to parse "
264 "variable data. Got %d, expected %d.\n",
265 (int)buflen, (int)(sizeof(*hdr) + len));
266 result = -EIO;
267 goto out;
268 }
269 result = len;
270out:
271 return result;
272}
273
274
275/**
276 * Get value of attribute from fixed size attribute field.
277 *
278 * @attr: Pointer to attribute field.
279 * @value: Pointer to variable in which attribute value will be placed.
280 * @buflen: Size of buffer in which attribute field (including header)
281 * can be found.
282 * @returns: Amount of given buffer consumed by parsing for this attribute.
283 *
284 * The size and type of the value is known by the type of the attribute.
285 */
286#define wlp_get(type, type_code, name) \
287ssize_t wlp_get_##name(struct wlp *wlp, struct wlp_attr_##name *attr, \
288 type *value, ssize_t buflen) \
289{ \
290 struct device *dev = &wlp->rc->uwb_dev.dev; \
291 if (buflen < 0) \
292 return -EINVAL; \
293 if (buflen < sizeof(*attr)) { \
294 dev_err(dev, "WLP: Not enough space in buffer to parse" \
295 " attribute field. Need %d, received %zu\n", \
296 (int)sizeof(*attr), buflen); \
297 return -EIO; \
298 } \
299 if (wlp_check_attr_hdr(wlp, &attr->hdr, type_code, \
300 sizeof(attr->name)) < 0) { \
301 dev_err(dev, "WLP: Header verification failed. \n"); \
302 return -EINVAL; \
303 } \
304 *value = attr->name; \
305 return sizeof(*attr); \
306}
307
308#define wlp_get_sparse(type, type_code, name) \
309 static wlp_get(type, type_code, name)
310
311/**
312 * Get value of attribute from variable sized attribute field.
313 *
314 * @max: The maximum size of this attribute. This value is dictated by
315 * the maximum value from the WLP specification.
316 *
317 * @attr: Pointer to attribute field.
318 * @value: Pointer to variable that will contain the value. The memory
319 * must already have been allocated for this value.
320 * @buflen: Size of buffer in which attribute field (including header)
321 * can be found.
322 * @returns: Amount of given bufferconsumed by parsing for this attribute.
323 */
324#define wlp_vget(type_val, type_code, name, max) \
325static ssize_t wlp_get_##name(struct wlp *wlp, \
326 struct wlp_attr_##name *attr, \
327 type_val *value, ssize_t buflen) \
328{ \
329 struct device *dev = &wlp->rc->uwb_dev.dev; \
330 size_t len; \
331 if (buflen < 0) \
332 return -EINVAL; \
333 if (buflen < sizeof(*attr)) { \
334 dev_err(dev, "WLP: Not enough space in buffer to parse" \
335 " header.\n"); \
336 return -EIO; \
337 } \
338 if (le16_to_cpu(attr->hdr.type) != type_code) { \
339 dev_err(dev, "WLP: Unexpected attribute type. Got %u, " \
340 "expected %u.\n", le16_to_cpu(attr->hdr.type), \
341 type_code); \
342 return -EINVAL; \
343 } \
344 len = le16_to_cpu(attr->hdr.length); \
345 if (len > max) { \
346 dev_err(dev, "WLP: Attribute larger than maximum " \
347 "allowed. Received %zu, max is %d.\n", len, \
348 (int)max); \
349 return -EFBIG; \
350 } \
351 if (buflen < sizeof(*attr) + len) { \
352 dev_err(dev, "WLP: Not enough space in buffer to parse "\
353 "variable data.\n"); \
354 return -EIO; \
355 } \
356 memcpy(value, (void *) attr + sizeof(*attr), len); \
357 return sizeof(*attr) + len; \
358}
359
360wlp_get(u8, WLP_ATTR_WLP_VER, version)
361wlp_get_sparse(enum wlp_wss_sel_mthd, WLP_ATTR_WSS_SEL_MTHD, wss_sel_mthd)
362wlp_get_sparse(struct wlp_dev_type, WLP_ATTR_PRI_DEV_TYPE, prim_dev_type)
363wlp_get_sparse(enum wlp_assc_error, WLP_ATTR_WLP_ASSC_ERR, wlp_assc_err)
364wlp_get_sparse(struct wlp_uuid, WLP_ATTR_UUID_E, uuid_e)
365wlp_get_sparse(struct wlp_uuid, WLP_ATTR_UUID_R, uuid_r)
366wlp_get(struct wlp_uuid, WLP_ATTR_WSSID, wssid)
367wlp_get_sparse(u8, WLP_ATTR_ACC_ENRL, accept_enrl)
368wlp_get_sparse(u8, WLP_ATTR_WSS_SEC_STAT, wss_sec_status)
369wlp_get_sparse(struct uwb_mac_addr, WLP_ATTR_WSS_BCAST, wss_bcast)
370wlp_get_sparse(u8, WLP_ATTR_WSS_TAG, wss_tag)
371wlp_get_sparse(struct uwb_mac_addr, WLP_ATTR_WSS_VIRT, wss_virt)
372wlp_get_sparse(struct wlp_nonce, WLP_ATTR_ENRL_NONCE, enonce)
373wlp_get_sparse(struct wlp_nonce, WLP_ATTR_REG_NONCE, rnonce)
374
375/* The buffers for the device info attributes can be found in the
376 * wlp_device_info struct. These buffers contain one byte more than the
377 * max allowed by the spec - this is done to be able to add the
378 * terminating \0 for user display. This terminating byte is not required
379 * in the actual attribute field (because it has a length field) so the
380 * maximum allowed for this value is one less than its size in the
381 * structure.
382 */
383wlp_vget(char, WLP_ATTR_WSS_NAME, wss_name,
384 FIELD_SIZEOF(struct wlp_wss, name) - 1)
385wlp_vget(char, WLP_ATTR_DEV_NAME, dev_name,
386 FIELD_SIZEOF(struct wlp_device_info, name) - 1)
387wlp_vget(char, WLP_ATTR_MANUF, manufacturer,
388 FIELD_SIZEOF(struct wlp_device_info, manufacturer) - 1)
389wlp_vget(char, WLP_ATTR_MODEL_NAME, model_name,
390 FIELD_SIZEOF(struct wlp_device_info, model_name) - 1)
391wlp_vget(char, WLP_ATTR_MODEL_NR, model_nr,
392 FIELD_SIZEOF(struct wlp_device_info, model_nr) - 1)
393wlp_vget(char, WLP_ATTR_SERIAL, serial,
394 FIELD_SIZEOF(struct wlp_device_info, serial) - 1)
395
396/**
397 * Retrieve WSS Name, Accept enroll, Secure status, Broadcast from WSS info
398 *
399 * @attr: pointer to WSS name attribute in WSS information attribute field
400 * @info: structure that will be populated with data from WSS information
401 * field (WSS name, Accept enroll, secure status, broadcast address)
402 * @buflen: size of buffer
403 *
404 * Although the WSSID attribute forms part of the WSS info attribute it is
405 * retrieved separately and stored in a different location.
406 */
407static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp,
408 struct wlp_attr_hdr *attr,
409 struct wlp_wss_tmp_info *info,
410 ssize_t buflen)
411{
412 struct device *dev = &wlp->rc->uwb_dev.dev;
413 void *ptr = attr;
414 size_t used = 0;
415 ssize_t result = -EINVAL;
416
417 d_printf(6, dev, "WLP: WSS info: Retrieving WSS name\n");
418 result = wlp_get_wss_name(wlp, ptr, info->name, buflen);
419 if (result < 0) {
420 dev_err(dev, "WLP: unable to obtain WSS name from "
421 "WSS info in D2 message.\n");
422 goto error_parse;
423 }
424 used += result;
425 d_printf(6, dev, "WLP: WSS info: Retrieving accept enroll\n");
426 result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll,
427 buflen - used);
428 if (result < 0) {
429 dev_err(dev, "WLP: unable to obtain accepting "
430 "enrollment from WSS info in D2 message.\n");
431 goto error_parse;
432 }
433 if (info->accept_enroll != 0 && info->accept_enroll != 1) {
434 dev_err(dev, "WLP: invalid value for accepting "
435 "enrollment in D2 message.\n");
436 result = -EINVAL;
437 goto error_parse;
438 }
439 used += result;
440 d_printf(6, dev, "WLP: WSS info: Retrieving secure status\n");
441 result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status,
442 buflen - used);
443 if (result < 0) {
444 dev_err(dev, "WLP: unable to obtain secure "
445 "status from WSS info in D2 message.\n");
446 goto error_parse;
447 }
448 if (info->sec_status != 0 && info->sec_status != 1) {
449 dev_err(dev, "WLP: invalid value for secure "
450 "status in D2 message.\n");
451 result = -EINVAL;
452 goto error_parse;
453 }
454 used += result;
455 d_printf(6, dev, "WLP: WSS info: Retrieving broadcast\n");
456 result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast,
457 buflen - used);
458 if (result < 0) {
459 dev_err(dev, "WLP: unable to obtain broadcast "
460 "address from WSS info in D2 message.\n");
461 goto error_parse;
462 }
463 used += result;
464 result = used;
465error_parse:
466 return result;
467}
468
469/**
470 * Create a new WSSID entry for the neighbor, allocate temporary storage
471 *
472 * Each neighbor can have many WSS active. We maintain a list of WSSIDs
473 * advertised by neighbor. During discovery we also cache information about
474 * these WSS in temporary storage.
475 *
476 * The temporary storage will be removed after it has been used (eg.
477 * displayed to user), the wssid element will be removed from the list when
478 * the neighbor is rediscovered or when it disappears.
479 */
480static struct wlp_wssid_e *wlp_create_wssid_e(struct wlp *wlp,
481 struct wlp_neighbor_e *neighbor)
482{
483 struct device *dev = &wlp->rc->uwb_dev.dev;
484 struct wlp_wssid_e *wssid_e;
485
486 wssid_e = kzalloc(sizeof(*wssid_e), GFP_KERNEL);
487 if (wssid_e == NULL) {
488 dev_err(dev, "WLP: unable to allocate memory "
489 "for WSS information.\n");
490 goto error_alloc;
491 }
492 wssid_e->info = kzalloc(sizeof(struct wlp_wss_tmp_info), GFP_KERNEL);
493 if (wssid_e->info == NULL) {
494 dev_err(dev, "WLP: unable to allocate memory "
495 "for temporary WSS information.\n");
496 kfree(wssid_e);
497 wssid_e = NULL;
498 goto error_alloc;
499 }
500 list_add(&wssid_e->node, &neighbor->wssid);
501error_alloc:
502 return wssid_e;
503}
504
505/**
506 * Parse WSS information attribute
507 *
508 * @attr: pointer to WSS information attribute header
509 * @buflen: size of buffer in which WSS information attribute appears
510 * @wssid: will place wssid from WSS info attribute in this location
511 * @wss_info: will place other information from WSS information attribute
512 * in this location
513 *
514 * memory for @wssid and @wss_info must be allocated when calling this
515 */
516static ssize_t wlp_get_wss_info(struct wlp *wlp, struct wlp_attr_wss_info *attr,
517 size_t buflen, struct wlp_uuid *wssid,
518 struct wlp_wss_tmp_info *wss_info)
519{
520 struct device *dev = &wlp->rc->uwb_dev.dev;
521 ssize_t result;
522 size_t len;
523 size_t used = 0;
524 void *ptr;
525
526 result = wlp_check_wss_info_attr_hdr(wlp, (struct wlp_attr_hdr *)attr,
527 buflen);
528 if (result < 0)
529 goto out;
530 len = result;
531 used = sizeof(*attr);
532 ptr = attr;
533 d_printf(6, dev, "WLP: WSS info: Retrieving WSSID\n");
534 result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used);
535 if (result < 0) {
536 dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n");
537 goto out;
538 }
539 used += result;
540 result = wlp_get_wss_info_attrs(wlp, ptr + used, wss_info,
541 buflen - used);
542 if (result < 0) {
543 dev_err(dev, "WLP: unable to obtain WSS information "
544 "from WSS information attributes. \n");
545 goto out;
546 }
547 used += result;
548 if (len + sizeof(*attr) != used) {
549 dev_err(dev, "WLP: Amount of data parsed does not "
550 "match length field. Parsed %zu, length "
551 "field %zu. \n", used, len);
552 result = -EINVAL;
553 goto out;
554 }
555 result = used;
556 d_printf(6, dev, "WLP: Successfully parsed WLP information "
557 "attribute. used %zu bytes\n", used);
558out:
559 return result;
560}
561
562/**
563 * Retrieve WSS info from association frame
564 *
565 * @attr: pointer to WSS information attribute
566 * @neighbor: ptr to neighbor being discovered, NULL if enrollment in
567 * progress
568 * @wss: ptr to WSS being enrolled in, NULL if discovery in progress
569 * @buflen: size of buffer in which WSS information appears
570 *
571 * The WSS information attribute appears in the D2 association message.
572 * This message is used in two ways: to discover all neighbors or to enroll
573 * into a WSS activated by a neighbor. During discovery we only want to
574 * store the WSS info in a cache, to be deleted right after it has been
575 * used (eg. displayed to the user). During enrollment we store the WSS
576 * information for the lifetime of enrollment.
577 *
578 * During discovery we are interested in all WSS information, during
579 * enrollment we are only interested in the WSS being enrolled in. Even so,
580 * when in enrollment we keep parsing the message after finding the WSS of
581 * interest, this simplifies the calling routine in that it can be sure
582 * that all WSS information attributes have been parsed out of the message.
583 *
584 * Association frame is process with nbmutex held. The list access is safe.
585 */
586static ssize_t wlp_get_all_wss_info(struct wlp *wlp,
587 struct wlp_attr_wss_info *attr,
588 struct wlp_neighbor_e *neighbor,
589 struct wlp_wss *wss, ssize_t buflen)
590{
591 struct device *dev = &wlp->rc->uwb_dev.dev;
592 size_t used = 0;
593 ssize_t result = -EINVAL;
594 struct wlp_attr_wss_info *cur;
595 struct wlp_uuid wssid;
596 struct wlp_wss_tmp_info wss_info;
597 unsigned enroll; /* 0 - discovery to cache, 1 - enrollment */
598 struct wlp_wssid_e *wssid_e;
599 char buf[WLP_WSS_UUID_STRSIZE];
600
601 d_fnstart(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d \n",
602 wlp, attr, neighbor, wss, (int)buflen);
603 if (buflen < 0)
604 goto out;
605
606 if (neighbor != NULL && wss == NULL)
607 enroll = 0; /* discovery */
608 else if (wss != NULL && neighbor == NULL)
609 enroll = 1; /* enrollment */
610 else
611 goto out;
612
613 cur = attr;
614 while (buflen - used > 0) {
615 memset(&wss_info, 0, sizeof(wss_info));
616 cur = (void *)cur + used;
617 result = wlp_get_wss_info(wlp, cur, buflen - used, &wssid,
618 &wss_info);
619 if (result == -ENODATA) {
620 result = used;
621 goto out;
622 } else if (result < 0) {
623 dev_err(dev, "WLP: Unable to parse WSS information "
624 "from WSS information attribute. \n");
625 result = -EINVAL;
626 goto error_parse;
627 }
628 if (enroll && !memcmp(&wssid, &wss->wssid, sizeof(wssid))) {
629 if (wss_info.accept_enroll != 1) {
630 dev_err(dev, "WLP: Requested WSS does "
631 "not accept enrollment.\n");
632 result = -EINVAL;
633 goto out;
634 }
635 memcpy(wss->name, wss_info.name, sizeof(wss->name));
636 wss->bcast = wss_info.bcast;
637 wss->secure_status = wss_info.sec_status;
638 wss->accept_enroll = wss_info.accept_enroll;
639 wss->state = WLP_WSS_STATE_PART_ENROLLED;
640 wlp_wss_uuid_print(buf, sizeof(buf), &wssid);
641 d_printf(2, dev, "WLP: Found WSS %s. Enrolling.\n",
642 buf);
643 } else {
644 wssid_e = wlp_create_wssid_e(wlp, neighbor);
645 if (wssid_e == NULL) {
646 dev_err(dev, "WLP: Cannot create new WSSID "
647 "entry for neighbor %02x:%02x.\n",
648 neighbor->uwb_dev->dev_addr.data[1],
649 neighbor->uwb_dev->dev_addr.data[0]);
650 result = -ENOMEM;
651 goto out;
652 }
653 wssid_e->wssid = wssid;
654 *wssid_e->info = wss_info;
655 }
656 used += result;
657 }
658 result = used;
659error_parse:
660 if (result < 0 && !enroll) /* this was a discovery */
661 wlp_remove_neighbor_tmp_info(neighbor);
662out:
663 d_fnend(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d, "
664 "result %d \n", wlp, attr, neighbor, wss, (int)buflen,
665 (int)result);
666 return result;
667
668}
669
670/**
671 * Parse WSS information attributes into cache for discovery
672 *
673 * @attr: the first WSS information attribute in message
674 * @neighbor: the neighbor whose cache will be populated
675 * @buflen: size of the input buffer
676 */
677static ssize_t wlp_get_wss_info_to_cache(struct wlp *wlp,
678 struct wlp_attr_wss_info *attr,
679 struct wlp_neighbor_e *neighbor,
680 ssize_t buflen)
681{
682 return wlp_get_all_wss_info(wlp, attr, neighbor, NULL, buflen);
683}
684
685/**
686 * Parse WSS information attributes into WSS struct for enrollment
687 *
688 * @attr: the first WSS information attribute in message
689 * @wss: the WSS that will be enrolled
690 * @buflen: size of the input buffer
691 */
692static ssize_t wlp_get_wss_info_to_enroll(struct wlp *wlp,
693 struct wlp_attr_wss_info *attr,
694 struct wlp_wss *wss, ssize_t buflen)
695{
696 return wlp_get_all_wss_info(wlp, attr, NULL, wss, buflen);
697}
698
699/**
700 * Construct a D1 association frame
701 *
702 * We use the radio control functions to determine the values of the device
703 * properties. These are of variable length and the total space needed is
704 * tallied first before we start constructing the message. The radio
705 * control functions return strings that are terminated with \0. This
706 * character should not be included in the message (there is a length field
707 * accompanying it in the attribute).
708 */
709static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss,
710 struct sk_buff **skb)
711{
712
713 struct device *dev = &wlp->rc->uwb_dev.dev;
714 int result = 0;
715 struct wlp_device_info *info;
716 size_t used = 0;
717 struct wlp_frame_assoc *_d1;
718 struct sk_buff *_skb;
719 void *d1_itr;
720
721 d_fnstart(6, dev, "wlp %p\n", wlp);
722 if (wlp->dev_info == NULL) {
723 result = __wlp_setup_device_info(wlp);
724 if (result < 0) {
725 dev_err(dev, "WLP: Unable to setup device "
726 "information for D1 message.\n");
727 goto error;
728 }
729 }
730 info = wlp->dev_info;
731 d_printf(6, dev, "Local properties:\n"
732 "Device name (%d bytes): %s\n"
733 "Model name (%d bytes): %s\n"
734 "Manufacturer (%d bytes): %s\n"
735 "Model number (%d bytes): %s\n"
736 "Serial number (%d bytes): %s\n"
737 "Primary device type: \n"
738 " Category: %d \n"
739 " OUI: %02x:%02x:%02x \n"
740 " OUI Subdivision: %u \n",
741 (int)strlen(info->name), info->name,
742 (int)strlen(info->model_name), info->model_name,
743 (int)strlen(info->manufacturer), info->manufacturer,
744 (int)strlen(info->model_nr), info->model_nr,
745 (int)strlen(info->serial), info->serial,
746 info->prim_dev_type.category,
747 info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1],
748 info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv);
749 _skb = dev_alloc_skb(sizeof(*_d1)
750 + sizeof(struct wlp_attr_uuid_e)
751 + sizeof(struct wlp_attr_wss_sel_mthd)
752 + sizeof(struct wlp_attr_dev_name)
753 + strlen(info->name)
754 + sizeof(struct wlp_attr_manufacturer)
755 + strlen(info->manufacturer)
756 + sizeof(struct wlp_attr_model_name)
757 + strlen(info->model_name)
758 + sizeof(struct wlp_attr_model_nr)
759 + strlen(info->model_nr)
760 + sizeof(struct wlp_attr_serial)
761 + strlen(info->serial)
762 + sizeof(struct wlp_attr_prim_dev_type)
763 + sizeof(struct wlp_attr_wlp_assc_err));
764 if (_skb == NULL) {
765 dev_err(dev, "WLP: Cannot allocate memory for association "
766 "message.\n");
767 result = -ENOMEM;
768 goto error;
769 }
770 _d1 = (void *) _skb->data;
771 d_printf(6, dev, "D1 starts at %p \n", _d1);
772 _d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
773 _d1->hdr.type = WLP_FRAME_ASSOCIATION;
774 _d1->type = WLP_ASSOC_D1;
775
776 wlp_set_version(&_d1->version, WLP_VERSION);
777 wlp_set_msg_type(&_d1->msg_type, WLP_ASSOC_D1);
778 d1_itr = _d1->attr;
779 used = wlp_set_uuid_e(d1_itr, &wlp->uuid);
780 used += wlp_set_wss_sel_mthd(d1_itr + used, WLP_WSS_REG_SELECT);
781 used += wlp_set_dev_name(d1_itr + used, info->name,
782 strlen(info->name));
783 used += wlp_set_manufacturer(d1_itr + used, info->manufacturer,
784 strlen(info->manufacturer));
785 used += wlp_set_model_name(d1_itr + used, info->model_name,
786 strlen(info->model_name));
787 used += wlp_set_model_nr(d1_itr + used, info->model_nr,
788 strlen(info->model_nr));
789 used += wlp_set_serial(d1_itr + used, info->serial,
790 strlen(info->serial));
791 used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type);
792 used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE);
793 skb_put(_skb, sizeof(*_d1) + used);
794 d_printf(6, dev, "D1 message:\n");
795 d_dump(6, dev, _d1, sizeof(*_d1)
796 + sizeof(struct wlp_attr_uuid_e)
797 + sizeof(struct wlp_attr_wss_sel_mthd)
798 + sizeof(struct wlp_attr_dev_name)
799 + strlen(info->name)
800 + sizeof(struct wlp_attr_manufacturer)
801 + strlen(info->manufacturer)
802 + sizeof(struct wlp_attr_model_name)
803 + strlen(info->model_name)
804 + sizeof(struct wlp_attr_model_nr)
805 + strlen(info->model_nr)
806 + sizeof(struct wlp_attr_serial)
807 + strlen(info->serial)
808 + sizeof(struct wlp_attr_prim_dev_type)
809 + sizeof(struct wlp_attr_wlp_assc_err));
810 *skb = _skb;
811error:
812 d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result);
813 return result;
814}
815
816/**
817 * Construct a D2 association frame
818 *
819 * We use the radio control functions to determine the values of the device
820 * properties. These are of variable length and the total space needed is
821 * tallied first before we start constructing the message. The radio
822 * control functions return strings that are terminated with \0. This
823 * character should not be included in the message (there is a length field
824 * accompanying it in the attribute).
825 */
826static
827int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss,
828 struct sk_buff **skb, struct wlp_uuid *uuid_e)
829{
830
831 struct device *dev = &wlp->rc->uwb_dev.dev;
832 int result = 0;
833 struct wlp_device_info *info;
834 size_t used = 0;
835 struct wlp_frame_assoc *_d2;
836 struct sk_buff *_skb;
837 void *d2_itr;
838 size_t mem_needed;
839
840 d_fnstart(6, dev, "wlp %p\n", wlp);
841 if (wlp->dev_info == NULL) {
842 result = __wlp_setup_device_info(wlp);
843 if (result < 0) {
844 dev_err(dev, "WLP: Unable to setup device "
845 "information for D2 message.\n");
846 goto error;
847 }
848 }
849 info = wlp->dev_info;
850 d_printf(6, dev, "Local properties:\n"
851 "Device name (%d bytes): %s\n"
852 "Model name (%d bytes): %s\n"
853 "Manufacturer (%d bytes): %s\n"
854 "Model number (%d bytes): %s\n"
855 "Serial number (%d bytes): %s\n"
856 "Primary device type: \n"
857 " Category: %d \n"
858 " OUI: %02x:%02x:%02x \n"
859 " OUI Subdivision: %u \n",
860 (int)strlen(info->name), info->name,
861 (int)strlen(info->model_name), info->model_name,
862 (int)strlen(info->manufacturer), info->manufacturer,
863 (int)strlen(info->model_nr), info->model_nr,
864 (int)strlen(info->serial), info->serial,
865 info->prim_dev_type.category,
866 info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1],
867 info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv);
868 mem_needed = sizeof(*_d2)
869 + sizeof(struct wlp_attr_uuid_e)
870 + sizeof(struct wlp_attr_uuid_r)
871 + sizeof(struct wlp_attr_dev_name)
872 + strlen(info->name)
873 + sizeof(struct wlp_attr_manufacturer)
874 + strlen(info->manufacturer)
875 + sizeof(struct wlp_attr_model_name)
876 + strlen(info->model_name)
877 + sizeof(struct wlp_attr_model_nr)
878 + strlen(info->model_nr)
879 + sizeof(struct wlp_attr_serial)
880 + strlen(info->serial)
881 + sizeof(struct wlp_attr_prim_dev_type)
882 + sizeof(struct wlp_attr_wlp_assc_err);
883 if (wlp->wss.state >= WLP_WSS_STATE_ACTIVE)
884 mem_needed += sizeof(struct wlp_attr_wss_info)
885 + sizeof(struct wlp_wss_info)
886 + strlen(wlp->wss.name);
887 _skb = dev_alloc_skb(mem_needed);
888 if (_skb == NULL) {
889 dev_err(dev, "WLP: Cannot allocate memory for association "
890 "message.\n");
891 result = -ENOMEM;
892 goto error;
893 }
894 _d2 = (void *) _skb->data;
895 d_printf(6, dev, "D2 starts at %p \n", _d2);
896 _d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
897 _d2->hdr.type = WLP_FRAME_ASSOCIATION;
898 _d2->type = WLP_ASSOC_D2;
899
900 wlp_set_version(&_d2->version, WLP_VERSION);
901 wlp_set_msg_type(&_d2->msg_type, WLP_ASSOC_D2);
902 d2_itr = _d2->attr;
903 used = wlp_set_uuid_e(d2_itr, uuid_e);
904 used += wlp_set_uuid_r(d2_itr + used, &wlp->uuid);
905 if (wlp->wss.state >= WLP_WSS_STATE_ACTIVE)
906 used += wlp_set_wss_info(d2_itr + used, &wlp->wss);
907 used += wlp_set_dev_name(d2_itr + used, info->name,
908 strlen(info->name));
909 used += wlp_set_manufacturer(d2_itr + used, info->manufacturer,
910 strlen(info->manufacturer));
911 used += wlp_set_model_name(d2_itr + used, info->model_name,
912 strlen(info->model_name));
913 used += wlp_set_model_nr(d2_itr + used, info->model_nr,
914 strlen(info->model_nr));
915 used += wlp_set_serial(d2_itr + used, info->serial,
916 strlen(info->serial));
917 used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type);
918 used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE);
919 skb_put(_skb, sizeof(*_d2) + used);
920 d_printf(6, dev, "D2 message:\n");
921 d_dump(6, dev, _d2, mem_needed);
922 *skb = _skb;
923error:
924 d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result);
925 return result;
926}
927
928/**
929 * Allocate memory for and populate fields of F0 association frame
930 *
931 * Currently (while focusing on unsecure enrollment) we ignore the
932 * nonce's that could be placed in the message. Only the error field is
933 * populated by the value provided by the caller.
934 */
935static
936int wlp_build_assoc_f0(struct wlp *wlp, struct sk_buff **skb,
937 enum wlp_assc_error error)
938{
939 struct device *dev = &wlp->rc->uwb_dev.dev;
940 int result = -ENOMEM;
941 struct {
942 struct wlp_frame_assoc f0_hdr;
943 struct wlp_attr_enonce enonce;
944 struct wlp_attr_rnonce rnonce;
945 struct wlp_attr_wlp_assc_err assc_err;
946 } *f0;
947 struct sk_buff *_skb;
948 struct wlp_nonce tmp;
949
950 d_fnstart(6, dev, "wlp %p\n", wlp);
951 _skb = dev_alloc_skb(sizeof(*f0));
952 if (_skb == NULL) {
953 dev_err(dev, "WLP: Unable to allocate memory for F0 "
954 "association frame. \n");
955 goto error_alloc;
956 }
957 f0 = (void *) _skb->data;
958 d_printf(6, dev, "F0 starts at %p \n", f0);
959 f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
960 f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION;
961 f0->f0_hdr.type = WLP_ASSOC_F0;
962 wlp_set_version(&f0->f0_hdr.version, WLP_VERSION);
963 wlp_set_msg_type(&f0->f0_hdr.msg_type, WLP_ASSOC_F0);
964 memset(&tmp, 0, sizeof(tmp));
965 wlp_set_enonce(&f0->enonce, &tmp);
966 wlp_set_rnonce(&f0->rnonce, &tmp);
967 wlp_set_wlp_assc_err(&f0->assc_err, error);
968 skb_put(_skb, sizeof(*f0));
969 *skb = _skb;
970 result = 0;
971error_alloc:
972 d_fnend(6, dev, "wlp %p, result %d \n", wlp, result);
973 return result;
974}
975
976/**
977 * Parse F0 frame
978 *
979 * We just retrieve the values and print it as an error to the user.
980 * Calling function already knows an error occured (F0 indicates error), so
981 * we just parse the content as debug for higher layers.
982 */
983int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
984{
985 struct device *dev = &wlp->rc->uwb_dev.dev;
986 struct wlp_frame_assoc *f0 = (void *) skb->data;
987 void *ptr = skb->data;
988 size_t len = skb->len;
989 size_t used;
990 ssize_t result;
991 struct wlp_nonce enonce, rnonce;
992 enum wlp_assc_error assc_err;
993 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
994 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
995
996 used = sizeof(*f0);
997 result = wlp_get_enonce(wlp, ptr + used, &enonce, len - used);
998 if (result < 0) {
999 dev_err(dev, "WLP: unable to obtain Enrollee nonce "
1000 "attribute from F0 message.\n");
1001 goto error_parse;
1002 }
1003 used += result;
1004 result = wlp_get_rnonce(wlp, ptr + used, &rnonce, len - used);
1005 if (result < 0) {
1006 dev_err(dev, "WLP: unable to obtain Registrar nonce "
1007 "attribute from F0 message.\n");
1008 goto error_parse;
1009 }
1010 used += result;
1011 result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used);
1012 if (result < 0) {
1013 dev_err(dev, "WLP: unable to obtain WLP Association error "
1014 "attribute from F0 message.\n");
1015 goto error_parse;
1016 }
1017 wlp_wss_nonce_print(enonce_buf, sizeof(enonce_buf), &enonce);
1018 wlp_wss_nonce_print(rnonce_buf, sizeof(rnonce_buf), &rnonce);
1019 dev_err(dev, "WLP: Received F0 error frame from neighbor. Enrollee "
1020 "nonce: %s, Registrar nonce: %s, WLP Association error: %s.\n",
1021 enonce_buf, rnonce_buf, wlp_assc_error_str(assc_err));
1022 result = 0;
1023error_parse:
1024 return result;
1025}
1026
1027/**
1028 * Retrieve variable device information from association message
1029 *
1030 * The device information parsed is not required in any message. This
1031 * routine will thus not fail if an attribute is not present.
1032 * The attributes are expected in a certain order, even if all are not
1033 * present. The "attribute type" value is used to ensure the attributes
1034 * are parsed in the correct order.
1035 *
1036 * If an error is encountered during parsing the function will return an
1037 * error code, when this happens the given device_info structure may be
1038 * partially filled.
1039 */
1040static
1041int wlp_get_variable_info(struct wlp *wlp, void *data,
1042 struct wlp_device_info *dev_info, ssize_t len)
1043{
1044 struct device *dev = &wlp->rc->uwb_dev.dev;
1045 size_t used = 0;
1046 struct wlp_attr_hdr *hdr;
1047 ssize_t result = 0;
1048 unsigned last = 0;
1049
1050 while (len - used > 0) {
1051 if (len - used < sizeof(*hdr)) {
1052 dev_err(dev, "WLP: Partial data in frame, cannot "
1053 "parse. \n");
1054 goto error_parse;
1055 }
1056 hdr = data + used;
1057 switch (le16_to_cpu(hdr->type)) {
1058 case WLP_ATTR_MANUF:
1059 if (last >= WLP_ATTR_MANUF) {
1060 dev_err(dev, "WLP: Incorrect order of "
1061 "attribute values in D1 msg.\n");
1062 goto error_parse;
1063 }
1064 result = wlp_get_manufacturer(wlp, data + used,
1065 dev_info->manufacturer,
1066 len - used);
1067 if (result < 0) {
1068 dev_err(dev, "WLP: Unable to obtain "
1069 "Manufacturer attribute from D1 "
1070 "message.\n");
1071 goto error_parse;
1072 }
1073 last = WLP_ATTR_MANUF;
1074 used += result;
1075 break;
1076 case WLP_ATTR_MODEL_NAME:
1077 if (last >= WLP_ATTR_MODEL_NAME) {
1078 dev_err(dev, "WLP: Incorrect order of "
1079 "attribute values in D1 msg.\n");
1080 goto error_parse;
1081 }
1082 result = wlp_get_model_name(wlp, data + used,
1083 dev_info->model_name,
1084 len - used);
1085 if (result < 0) {
1086 dev_err(dev, "WLP: Unable to obtain Model "
1087 "name attribute from D1 message.\n");
1088 goto error_parse;
1089 }
1090 last = WLP_ATTR_MODEL_NAME;
1091 used += result;
1092 break;
1093 case WLP_ATTR_MODEL_NR:
1094 if (last >= WLP_ATTR_MODEL_NR) {
1095 dev_err(dev, "WLP: Incorrect order of "
1096 "attribute values in D1 msg.\n");
1097 goto error_parse;
1098 }
1099 result = wlp_get_model_nr(wlp, data + used,
1100 dev_info->model_nr,
1101 len - used);
1102 if (result < 0) {
1103 dev_err(dev, "WLP: Unable to obtain Model "
1104 "number attribute from D1 message.\n");
1105 goto error_parse;
1106 }
1107 last = WLP_ATTR_MODEL_NR;
1108 used += result;
1109 break;
1110 case WLP_ATTR_SERIAL:
1111 if (last >= WLP_ATTR_SERIAL) {
1112 dev_err(dev, "WLP: Incorrect order of "
1113 "attribute values in D1 msg.\n");
1114 goto error_parse;
1115 }
1116 result = wlp_get_serial(wlp, data + used,
1117 dev_info->serial, len - used);
1118 if (result < 0) {
1119 dev_err(dev, "WLP: Unable to obtain Serial "
1120 "number attribute from D1 message.\n");
1121 goto error_parse;
1122 }
1123 last = WLP_ATTR_SERIAL;
1124 used += result;
1125 break;
1126 case WLP_ATTR_PRI_DEV_TYPE:
1127 if (last >= WLP_ATTR_PRI_DEV_TYPE) {
1128 dev_err(dev, "WLP: Incorrect order of "
1129 "attribute values in D1 msg.\n");
1130 goto error_parse;
1131 }
1132 result = wlp_get_prim_dev_type(wlp, data + used,
1133 &dev_info->prim_dev_type,
1134 len - used);
1135 if (result < 0) {
1136 dev_err(dev, "WLP: Unable to obtain Primary "
1137 "device type attribute from D1 "
1138 "message.\n");
1139 goto error_parse;
1140 }
1141 dev_info->prim_dev_type.category =
1142 le16_to_cpu(dev_info->prim_dev_type.category);
1143 dev_info->prim_dev_type.subID =
1144 le16_to_cpu(dev_info->prim_dev_type.subID);
1145 last = WLP_ATTR_PRI_DEV_TYPE;
1146 used += result;
1147 break;
1148 default:
1149 /* This is not variable device information. */
1150 goto out;
1151 break;
1152 }
1153 }
1154out:
1155 return used;
1156error_parse:
1157 return -EINVAL;
1158}
1159
1160/**
1161 * Parse incoming D1 frame, populate attribute values
1162 *
1163 * Caller provides pointers to memory already allocated for attributes
1164 * expected in the D1 frame. These variables will be populated.
1165 */
1166static
1167int wlp_parse_d1_frame(struct wlp *wlp, struct sk_buff *skb,
1168 struct wlp_uuid *uuid_e,
1169 enum wlp_wss_sel_mthd *sel_mthd,
1170 struct wlp_device_info *dev_info,
1171 enum wlp_assc_error *assc_err)
1172{
1173 struct device *dev = &wlp->rc->uwb_dev.dev;
1174 struct wlp_frame_assoc *d1 = (void *) skb->data;
1175 void *ptr = skb->data;
1176 size_t len = skb->len;
1177 size_t used;
1178 ssize_t result;
1179
1180 used = sizeof(*d1);
1181 result = wlp_get_uuid_e(wlp, ptr + used, uuid_e, len - used);
1182 if (result < 0) {
1183 dev_err(dev, "WLP: unable to obtain UUID-E attribute from D1 "
1184 "message.\n");
1185 goto error_parse;
1186 }
1187 used += result;
1188 result = wlp_get_wss_sel_mthd(wlp, ptr + used, sel_mthd, len - used);
1189 if (result < 0) {
1190 dev_err(dev, "WLP: unable to obtain WSS selection method "
1191 "from D1 message.\n");
1192 goto error_parse;
1193 }
1194 used += result;
1195 result = wlp_get_dev_name(wlp, ptr + used, dev_info->name,
1196 len - used);
1197 if (result < 0) {
1198 dev_err(dev, "WLP: unable to obtain Device Name from D1 "
1199 "message.\n");
1200 goto error_parse;
1201 }
1202 used += result;
1203 result = wlp_get_variable_info(wlp, ptr + used, dev_info, len - used);
1204 if (result < 0) {
1205 dev_err(dev, "WLP: unable to obtain Device Information from "
1206 "D1 message.\n");
1207 goto error_parse;
1208 }
1209 used += result;
1210 result = wlp_get_wlp_assc_err(wlp, ptr + used, assc_err, len - used);
1211 if (result < 0) {
1212 dev_err(dev, "WLP: unable to obtain WLP Association Error "
1213 "Information from D1 message.\n");
1214 goto error_parse;
1215 }
1216 result = 0;
1217error_parse:
1218 return result;
1219}
1220/**
1221 * Handle incoming D1 frame
1222 *
1223 * The frame has already been verified to contain an Association header with
1224 * the correct version number. Parse the incoming frame, construct and send
1225 * a D2 frame in response.
1226 *
1227 * It is not clear what to do with most fields in the incoming D1 frame. We
1228 * retrieve and discard the information here for now.
1229 */
1230void wlp_handle_d1_frame(struct work_struct *ws)
1231{
1232 struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws,
1233 struct wlp_assoc_frame_ctx,
1234 ws);
1235 struct wlp *wlp = frame_ctx->wlp;
1236 struct wlp_wss *wss = &wlp->wss;
1237 struct sk_buff *skb = frame_ctx->skb;
1238 struct uwb_dev_addr *src = &frame_ctx->src;
1239 int result;
1240 struct device *dev = &wlp->rc->uwb_dev.dev;
1241 struct wlp_uuid uuid_e;
1242 enum wlp_wss_sel_mthd sel_mthd = 0;
1243 struct wlp_device_info dev_info;
1244 enum wlp_assc_error assc_err;
1245 char uuid[WLP_WSS_UUID_STRSIZE];
1246 struct sk_buff *resp = NULL;
1247
1248 /* Parse D1 frame */
1249 d_fnstart(6, dev, "WLP: handle D1 frame. wlp = %p, skb = %p\n",
1250 wlp, skb);
1251 mutex_lock(&wss->mutex);
1252 mutex_lock(&wlp->mutex); /* to access wlp->uuid */
1253 memset(&dev_info, 0, sizeof(dev_info));
1254 result = wlp_parse_d1_frame(wlp, skb, &uuid_e, &sel_mthd, &dev_info,
1255 &assc_err);
1256 if (result < 0) {
1257 dev_err(dev, "WLP: Unable to parse incoming D1 frame.\n");
1258 kfree_skb(skb);
1259 goto out;
1260 }
1261 wlp_wss_uuid_print(uuid, sizeof(uuid), &uuid_e);
1262 d_printf(6, dev, "From D1 frame:\n"
1263 "UUID-E: %s\n"
1264 "Selection method: %d\n"
1265 "Device name (%d bytes): %s\n"
1266 "Model name (%d bytes): %s\n"
1267 "Manufacturer (%d bytes): %s\n"
1268 "Model number (%d bytes): %s\n"
1269 "Serial number (%d bytes): %s\n"
1270 "Primary device type: \n"
1271 " Category: %d \n"
1272 " OUI: %02x:%02x:%02x \n"
1273 " OUI Subdivision: %u \n",
1274 uuid, sel_mthd,
1275 (int)strlen(dev_info.name), dev_info.name,
1276 (int)strlen(dev_info.model_name), dev_info.model_name,
1277 (int)strlen(dev_info.manufacturer), dev_info.manufacturer,
1278 (int)strlen(dev_info.model_nr), dev_info.model_nr,
1279 (int)strlen(dev_info.serial), dev_info.serial,
1280 dev_info.prim_dev_type.category,
1281 dev_info.prim_dev_type.OUI[0],
1282 dev_info.prim_dev_type.OUI[1],
1283 dev_info.prim_dev_type.OUI[2],
1284 dev_info.prim_dev_type.OUIsubdiv);
1285
1286 kfree_skb(skb);
1287 if (!wlp_uuid_is_set(&wlp->uuid)) {
1288 dev_err(dev, "WLP: UUID is not set. Set via sysfs to "
1289 "proceed. Respong to D1 message with error F0.\n");
1290 result = wlp_build_assoc_f0(wlp, &resp,
1291 WLP_ASSOC_ERROR_NOT_READY);
1292 if (result < 0) {
1293 dev_err(dev, "WLP: Unable to construct F0 message.\n");
1294 goto out;
1295 }
1296 } else {
1297 /* Construct D2 frame */
1298 result = wlp_build_assoc_d2(wlp, wss, &resp, &uuid_e);
1299 if (result < 0) {
1300 dev_err(dev, "WLP: Unable to construct D2 message.\n");
1301 goto out;
1302 }
1303 }
1304 /* Send D2 frame */
1305 BUG_ON(wlp->xmit_frame == NULL);
1306 result = wlp->xmit_frame(wlp, resp, src);
1307 if (result < 0) {
1308 dev_err(dev, "WLP: Unable to transmit D2 association "
1309 "message: %d\n", result);
1310 if (result == -ENXIO)
1311 dev_err(dev, "WLP: Is network interface up? \n");
1312 /* We could try again ... */
1313 dev_kfree_skb_any(resp); /* we need to free if tx fails */
1314 }
1315out:
1316 kfree(frame_ctx);
1317 mutex_unlock(&wlp->mutex);
1318 mutex_unlock(&wss->mutex);
1319 d_fnend(6, dev, "WLP: handle D1 frame. wlp = %p\n", wlp);
1320}
1321
1322/**
1323 * Parse incoming D2 frame, create and populate temporary cache
1324 *
1325 * @skb: socket buffer in which D2 frame can be found
1326 * @neighbor: the neighbor that sent the D2 frame
1327 *
1328 * Will allocate memory for temporary storage of information learned during
1329 * discovery.
1330 */
1331int wlp_parse_d2_frame_to_cache(struct wlp *wlp, struct sk_buff *skb,
1332 struct wlp_neighbor_e *neighbor)
1333{
1334 struct device *dev = &wlp->rc->uwb_dev.dev;
1335 struct wlp_frame_assoc *d2 = (void *) skb->data;
1336 void *ptr = skb->data;
1337 size_t len = skb->len;
1338 size_t used;
1339 ssize_t result;
1340 struct wlp_uuid uuid_e;
1341 struct wlp_device_info *nb_info;
1342 enum wlp_assc_error assc_err;
1343
1344 used = sizeof(*d2);
1345 result = wlp_get_uuid_e(wlp, ptr + used, &uuid_e, len - used);
1346 if (result < 0) {
1347 dev_err(dev, "WLP: unable to obtain UUID-E attribute from D2 "
1348 "message.\n");
1349 goto error_parse;
1350 }
1351 if (memcmp(&uuid_e, &wlp->uuid, sizeof(uuid_e))) {
1352 dev_err(dev, "WLP: UUID-E in incoming D2 does not match "
1353 "local UUID sent in D1. \n");
1354 goto error_parse;
1355 }
1356 used += result;
1357 result = wlp_get_uuid_r(wlp, ptr + used, &neighbor->uuid, len - used);
1358 if (result < 0) {
1359 dev_err(dev, "WLP: unable to obtain UUID-R attribute from D2 "
1360 "message.\n");
1361 goto error_parse;
1362 }
1363 used += result;
1364 result = wlp_get_wss_info_to_cache(wlp, ptr + used, neighbor,
1365 len - used);
1366 if (result < 0) {
1367 dev_err(dev, "WLP: unable to obtain WSS information "
1368 "from D2 message.\n");
1369 goto error_parse;
1370 }
1371 used += result;
1372 neighbor->info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL);
1373 if (neighbor->info == NULL) {
1374 dev_err(dev, "WLP: cannot allocate memory to store device "
1375 "info.\n");
1376 result = -ENOMEM;
1377 goto error_parse;
1378 }
1379 nb_info = neighbor->info;
1380 result = wlp_get_dev_name(wlp, ptr + used, nb_info->name,
1381 len - used);
1382 if (result < 0) {
1383 dev_err(dev, "WLP: unable to obtain Device Name from D2 "
1384 "message.\n");
1385 goto error_parse;
1386 }
1387 used += result;
1388 result = wlp_get_variable_info(wlp, ptr + used, nb_info, len - used);
1389 if (result < 0) {
1390 dev_err(dev, "WLP: unable to obtain Device Information from "
1391 "D2 message.\n");
1392 goto error_parse;
1393 }
1394 used += result;
1395 result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used);
1396 if (result < 0) {
1397 dev_err(dev, "WLP: unable to obtain WLP Association Error "
1398 "Information from D2 message.\n");
1399 goto error_parse;
1400 }
1401 if (assc_err != WLP_ASSOC_ERROR_NONE) {
1402 dev_err(dev, "WLP: neighbor device returned association "
1403 "error %d\n", assc_err);
1404 result = -EINVAL;
1405 goto error_parse;
1406 }
1407 result = 0;
1408error_parse:
1409 if (result < 0)
1410 wlp_remove_neighbor_tmp_info(neighbor);
1411 return result;
1412}
1413
1414/**
1415 * Parse incoming D2 frame, populate attribute values of WSS bein enrolled in
1416 *
1417 * @wss: our WSS that will be enrolled
1418 * @skb: socket buffer in which D2 frame can be found
1419 * @neighbor: the neighbor that sent the D2 frame
1420 * @wssid: the wssid of the WSS in which we want to enroll
1421 *
1422 * Forms part of enrollment sequence. We are trying to enroll in WSS with
1423 * @wssid by using @neighbor as registrar. A D1 message was sent to
1424 * @neighbor and now we need to parse the D2 response. The neighbor's
1425 * response is searched for the requested WSS and if found (and it accepts
1426 * enrollment), we store the information.
1427 */
1428int wlp_parse_d2_frame_to_enroll(struct wlp_wss *wss, struct sk_buff *skb,
1429 struct wlp_neighbor_e *neighbor,
1430 struct wlp_uuid *wssid)
1431{
1432 struct wlp *wlp = container_of(wss, struct wlp, wss);
1433 struct device *dev = &wlp->rc->uwb_dev.dev;
1434 void *ptr = skb->data;
1435 size_t len = skb->len;
1436 size_t used;
1437 ssize_t result;
1438 struct wlp_uuid uuid_e;
1439 struct wlp_uuid uuid_r;
1440 struct wlp_device_info nb_info;
1441 enum wlp_assc_error assc_err;
1442 char uuid_bufA[WLP_WSS_UUID_STRSIZE];
1443 char uuid_bufB[WLP_WSS_UUID_STRSIZE];
1444
1445 used = sizeof(struct wlp_frame_assoc);
1446 result = wlp_get_uuid_e(wlp, ptr + used, &uuid_e, len - used);
1447 if (result < 0) {
1448 dev_err(dev, "WLP: unable to obtain UUID-E attribute from D2 "
1449 "message.\n");
1450 goto error_parse;
1451 }
1452 if (memcmp(&uuid_e, &wlp->uuid, sizeof(uuid_e))) {
1453 dev_err(dev, "WLP: UUID-E in incoming D2 does not match "
1454 "local UUID sent in D1. \n");
1455 goto error_parse;
1456 }
1457 used += result;
1458 result = wlp_get_uuid_r(wlp, ptr + used, &uuid_r, len - used);
1459 if (result < 0) {
1460 dev_err(dev, "WLP: unable to obtain UUID-R attribute from D2 "
1461 "message.\n");
1462 goto error_parse;
1463 }
1464 if (memcmp(&uuid_r, &neighbor->uuid, sizeof(uuid_r))) {
1465 wlp_wss_uuid_print(uuid_bufA, sizeof(uuid_bufA),
1466 &neighbor->uuid);
1467 wlp_wss_uuid_print(uuid_bufB, sizeof(uuid_bufB), &uuid_r);
1468 dev_err(dev, "WLP: UUID of neighbor does not match UUID "
1469 "learned during discovery. Originally discovered: %s, "
1470 "now from D2 message: %s\n", uuid_bufA, uuid_bufB);
1471 result = -EINVAL;
1472 goto error_parse;
1473 }
1474 used += result;
1475 wss->wssid = *wssid;
1476 result = wlp_get_wss_info_to_enroll(wlp, ptr + used, wss, len - used);
1477 if (result < 0) {
1478 dev_err(dev, "WLP: unable to obtain WSS information "
1479 "from D2 message.\n");
1480 goto error_parse;
1481 }
1482 if (wss->state != WLP_WSS_STATE_PART_ENROLLED) {
1483 dev_err(dev, "WLP: D2 message did not contain information "
1484 "for successful enrollment. \n");
1485 result = -EINVAL;
1486 goto error_parse;
1487 }
1488 used += result;
1489 /* Place device information on stack to continue parsing of message */
1490 result = wlp_get_dev_name(wlp, ptr + used, nb_info.name,
1491 len - used);
1492 if (result < 0) {
1493 dev_err(dev, "WLP: unable to obtain Device Name from D2 "
1494 "message.\n");
1495 goto error_parse;
1496 }
1497 used += result;
1498 result = wlp_get_variable_info(wlp, ptr + used, &nb_info, len - used);
1499 if (result < 0) {
1500 dev_err(dev, "WLP: unable to obtain Device Information from "
1501 "D2 message.\n");
1502 goto error_parse;
1503 }
1504 used += result;
1505 result = wlp_get_wlp_assc_err(wlp, ptr + used, &assc_err, len - used);
1506 if (result < 0) {
1507 dev_err(dev, "WLP: unable to obtain WLP Association Error "
1508 "Information from D2 message.\n");
1509 goto error_parse;
1510 }
1511 if (assc_err != WLP_ASSOC_ERROR_NONE) {
1512 dev_err(dev, "WLP: neighbor device returned association "
1513 "error %d\n", assc_err);
1514 if (wss->state == WLP_WSS_STATE_PART_ENROLLED) {
1515 dev_err(dev, "WLP: Enrolled in WSS (should not "
1516 "happen according to spec). Undoing. \n");
1517 wlp_wss_reset(wss);
1518 }
1519 result = -EINVAL;
1520 goto error_parse;
1521 }
1522 result = 0;
1523error_parse:
1524 return result;
1525}
1526
1527/**
1528 * Parse C3/C4 frame into provided variables
1529 *
1530 * @wssid: will point to copy of wssid retrieved from C3/C4 frame
1531 * @tag: will point to copy of tag retrieved from C3/C4 frame
1532 * @virt_addr: will point to copy of virtual address retrieved from C3/C4
1533 * frame.
1534 *
1535 * Calling function has to allocate memory for these values.
1536 *
1537 * skb contains a valid C3/C4 frame, return the individual fields of this
1538 * frame in the provided variables.
1539 */
1540int wlp_parse_c3c4_frame(struct wlp *wlp, struct sk_buff *skb,
1541 struct wlp_uuid *wssid, u8 *tag,
1542 struct uwb_mac_addr *virt_addr)
1543{
1544 struct device *dev = &wlp->rc->uwb_dev.dev;
1545 int result;
1546 void *ptr = skb->data;
1547 size_t len = skb->len;
1548 size_t used;
1549 char buf[WLP_WSS_UUID_STRSIZE];
1550 struct wlp_frame_assoc *assoc = ptr;
1551
1552 d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb);
1553 used = sizeof(*assoc);
1554 result = wlp_get_wssid(wlp, ptr + used, wssid, len - used);
1555 if (result < 0) {
1556 dev_err(dev, "WLP: unable to obtain WSSID attribute from "
1557 "%s message.\n", wlp_assoc_frame_str(assoc->type));
1558 goto error_parse;
1559 }
1560 used += result;
1561 result = wlp_get_wss_tag(wlp, ptr + used, tag, len - used);
1562 if (result < 0) {
1563 dev_err(dev, "WLP: unable to obtain WSS tag attribute from "
1564 "%s message.\n", wlp_assoc_frame_str(assoc->type));
1565 goto error_parse;
1566 }
1567 used += result;
1568 result = wlp_get_wss_virt(wlp, ptr + used, virt_addr, len - used);
1569 if (result < 0) {
1570 dev_err(dev, "WLP: unable to obtain WSS virtual address "
1571 "attribute from %s message.\n",
1572 wlp_assoc_frame_str(assoc->type));
1573 goto error_parse;
1574 }
1575 wlp_wss_uuid_print(buf, sizeof(buf), wssid);
1576 d_printf(6, dev, "WLP: parsed: WSSID %s, tag 0x%02x, virt "
1577 "%02x:%02x:%02x:%02x:%02x:%02x \n", buf, *tag,
1578 virt_addr->data[0], virt_addr->data[1], virt_addr->data[2],
1579 virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]);
1580
1581error_parse:
1582 d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result);
1583 return result;
1584}
1585
1586/**
1587 * Allocate memory for and populate fields of C1 or C2 association frame
1588 *
1589 * The C1 and C2 association frames appear identical - except for the type.
1590 */
1591static
1592int wlp_build_assoc_c1c2(struct wlp *wlp, struct wlp_wss *wss,
1593 struct sk_buff **skb, enum wlp_assoc_type type)
1594{
1595 struct device *dev = &wlp->rc->uwb_dev.dev;
1596 int result = -ENOMEM;
1597 struct {
1598 struct wlp_frame_assoc c_hdr;
1599 struct wlp_attr_wssid wssid;
1600 } *c;
1601 struct sk_buff *_skb;
1602
1603 d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss);
1604 _skb = dev_alloc_skb(sizeof(*c));
1605 if (_skb == NULL) {
1606 dev_err(dev, "WLP: Unable to allocate memory for C1/C2 "
1607 "association frame. \n");
1608 goto error_alloc;
1609 }
1610 c = (void *) _skb->data;
1611 d_printf(6, dev, "C1/C2 starts at %p \n", c);
1612 c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
1613 c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION;
1614 c->c_hdr.type = type;
1615 wlp_set_version(&c->c_hdr.version, WLP_VERSION);
1616 wlp_set_msg_type(&c->c_hdr.msg_type, type);
1617 wlp_set_wssid(&c->wssid, &wss->wssid);
1618 skb_put(_skb, sizeof(*c));
1619 d_printf(6, dev, "C1/C2 message:\n");
1620 d_dump(6, dev, c, sizeof(*c));
1621 *skb = _skb;
1622 result = 0;
1623error_alloc:
1624 d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result);
1625 return result;
1626}
1627
1628
1629static
1630int wlp_build_assoc_c1(struct wlp *wlp, struct wlp_wss *wss,
1631 struct sk_buff **skb)
1632{
1633 return wlp_build_assoc_c1c2(wlp, wss, skb, WLP_ASSOC_C1);
1634}
1635
1636static
1637int wlp_build_assoc_c2(struct wlp *wlp, struct wlp_wss *wss,
1638 struct sk_buff **skb)
1639{
1640 return wlp_build_assoc_c1c2(wlp, wss, skb, WLP_ASSOC_C2);
1641}
1642
1643
1644/**
1645 * Allocate memory for and populate fields of C3 or C4 association frame
1646 *
1647 * The C3 and C4 association frames appear identical - except for the type.
1648 */
1649static
1650int wlp_build_assoc_c3c4(struct wlp *wlp, struct wlp_wss *wss,
1651 struct sk_buff **skb, enum wlp_assoc_type type)
1652{
1653 struct device *dev = &wlp->rc->uwb_dev.dev;
1654 int result = -ENOMEM;
1655 struct {
1656 struct wlp_frame_assoc c_hdr;
1657 struct wlp_attr_wssid wssid;
1658 struct wlp_attr_wss_tag wss_tag;
1659 struct wlp_attr_wss_virt wss_virt;
1660 } *c;
1661 struct sk_buff *_skb;
1662
1663 d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss);
1664 _skb = dev_alloc_skb(sizeof(*c));
1665 if (_skb == NULL) {
1666 dev_err(dev, "WLP: Unable to allocate memory for C3/C4 "
1667 "association frame. \n");
1668 goto error_alloc;
1669 }
1670 c = (void *) _skb->data;
1671 d_printf(6, dev, "C3/C4 starts at %p \n", c);
1672 c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
1673 c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION;
1674 c->c_hdr.type = type;
1675 wlp_set_version(&c->c_hdr.version, WLP_VERSION);
1676 wlp_set_msg_type(&c->c_hdr.msg_type, type);
1677 wlp_set_wssid(&c->wssid, &wss->wssid);
1678 wlp_set_wss_tag(&c->wss_tag, wss->tag);
1679 wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr);
1680 skb_put(_skb, sizeof(*c));
1681 d_printf(6, dev, "C3/C4 message:\n");
1682 d_dump(6, dev, c, sizeof(*c));
1683 *skb = _skb;
1684 result = 0;
1685error_alloc:
1686 d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result);
1687 return result;
1688}
1689
1690static
1691int wlp_build_assoc_c3(struct wlp *wlp, struct wlp_wss *wss,
1692 struct sk_buff **skb)
1693{
1694 return wlp_build_assoc_c3c4(wlp, wss, skb, WLP_ASSOC_C3);
1695}
1696
1697static
1698int wlp_build_assoc_c4(struct wlp *wlp, struct wlp_wss *wss,
1699 struct sk_buff **skb)
1700{
1701 return wlp_build_assoc_c3c4(wlp, wss, skb, WLP_ASSOC_C4);
1702}
1703
1704
1705#define wlp_send_assoc(type, id) \
1706static int wlp_send_assoc_##type(struct wlp *wlp, struct wlp_wss *wss, \
1707 struct uwb_dev_addr *dev_addr) \
1708{ \
1709 struct device *dev = &wlp->rc->uwb_dev.dev; \
1710 int result; \
1711 struct sk_buff *skb = NULL; \
1712 d_fnstart(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \
1713 wlp, wss, dev_addr->data[1], dev_addr->data[0]); \
1714 d_printf(6, dev, "WLP: Constructing %s frame. \n", \
1715 wlp_assoc_frame_str(id)); \
1716 /* Build the frame */ \
1717 result = wlp_build_assoc_##type(wlp, wss, &skb); \
1718 if (result < 0) { \
1719 dev_err(dev, "WLP: Unable to construct %s association " \
1720 "frame: %d\n", wlp_assoc_frame_str(id), result);\
1721 goto error_build_assoc; \
1722 } \
1723 /* Send the frame */ \
1724 d_printf(6, dev, "Transmitting %s frame to %02x:%02x \n", \
1725 wlp_assoc_frame_str(id), \
1726 dev_addr->data[1], dev_addr->data[0]); \
1727 BUG_ON(wlp->xmit_frame == NULL); \
1728 result = wlp->xmit_frame(wlp, skb, dev_addr); \
1729 if (result < 0) { \
1730 dev_err(dev, "WLP: Unable to transmit %s association " \
1731 "message: %d\n", wlp_assoc_frame_str(id), \
1732 result); \
1733 if (result == -ENXIO) \
1734 dev_err(dev, "WLP: Is network interface " \
1735 "up? \n"); \
1736 goto error_xmit; \
1737 } \
1738 return 0; \
1739error_xmit: \
1740 /* We could try again ... */ \
1741 dev_kfree_skb_any(skb);/*we need to free if tx fails*/ \
1742error_build_assoc: \
1743 d_fnend(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \
1744 wlp, wss, dev_addr->data[1], dev_addr->data[0]); \
1745 return result; \
1746}
1747
1748wlp_send_assoc(d1, WLP_ASSOC_D1)
1749wlp_send_assoc(c1, WLP_ASSOC_C1)
1750wlp_send_assoc(c3, WLP_ASSOC_C3)
1751
1752int wlp_send_assoc_frame(struct wlp *wlp, struct wlp_wss *wss,
1753 struct uwb_dev_addr *dev_addr,
1754 enum wlp_assoc_type type)
1755{
1756 int result = 0;
1757 struct device *dev = &wlp->rc->uwb_dev.dev;
1758 switch (type) {
1759 case WLP_ASSOC_D1:
1760 result = wlp_send_assoc_d1(wlp, wss, dev_addr);
1761 break;
1762 case WLP_ASSOC_C1:
1763 result = wlp_send_assoc_c1(wlp, wss, dev_addr);
1764 break;
1765 case WLP_ASSOC_C3:
1766 result = wlp_send_assoc_c3(wlp, wss, dev_addr);
1767 break;
1768 default:
1769 dev_err(dev, "WLP: Received request to send unknown "
1770 "association message.\n");
1771 result = -EINVAL;
1772 break;
1773 }
1774 return result;
1775}
1776
1777/**
1778 * Handle incoming C1 frame
1779 *
1780 * The frame has already been verified to contain an Association header with
1781 * the correct version number. Parse the incoming frame, construct and send
1782 * a C2 frame in response.
1783 */
1784void wlp_handle_c1_frame(struct work_struct *ws)
1785{
1786 struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws,
1787 struct wlp_assoc_frame_ctx,
1788 ws);
1789 struct wlp *wlp = frame_ctx->wlp;
1790 struct wlp_wss *wss = &wlp->wss;
1791 struct device *dev = &wlp->rc->uwb_dev.dev;
1792 struct wlp_frame_assoc *c1 = (void *) frame_ctx->skb->data;
1793 unsigned int len = frame_ctx->skb->len;
1794 struct uwb_dev_addr *src = &frame_ctx->src;
1795 int result;
1796 struct wlp_uuid wssid;
1797 char buf[WLP_WSS_UUID_STRSIZE];
1798 struct sk_buff *resp = NULL;
1799
1800 /* Parse C1 frame */
1801 d_fnstart(6, dev, "WLP: handle C1 frame. wlp = %p, c1 = %p\n",
1802 wlp, c1);
1803 mutex_lock(&wss->mutex);
1804 result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid,
1805 len - sizeof(*c1));
1806 if (result < 0) {
1807 dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n");
1808 goto out;
1809 }
1810 wlp_wss_uuid_print(buf, sizeof(buf), &wssid);
1811 d_printf(6, dev, "Received C1 frame with WSSID %s \n", buf);
1812 if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))
1813 && wss->state == WLP_WSS_STATE_ACTIVE) {
1814 d_printf(6, dev, "WSSID from C1 frame is known locally "
1815 "and is active\n");
1816 /* Construct C2 frame */
1817 result = wlp_build_assoc_c2(wlp, wss, &resp);
1818 if (result < 0) {
1819 dev_err(dev, "WLP: Unable to construct C2 message.\n");
1820 goto out;
1821 }
1822 } else {
1823 d_printf(6, dev, "WSSID from C1 frame is not known locally "
1824 "or is not active\n");
1825 /* Construct F0 frame */
1826 result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV);
1827 if (result < 0) {
1828 dev_err(dev, "WLP: Unable to construct F0 message.\n");
1829 goto out;
1830 }
1831 }
1832 /* Send C2 frame */
1833 d_printf(6, dev, "Transmitting response (C2/F0) frame to %02x:%02x \n",
1834 src->data[1], src->data[0]);
1835 BUG_ON(wlp->xmit_frame == NULL);
1836 result = wlp->xmit_frame(wlp, resp, src);
1837 if (result < 0) {
1838 dev_err(dev, "WLP: Unable to transmit response association "
1839 "message: %d\n", result);
1840 if (result == -ENXIO)
1841 dev_err(dev, "WLP: Is network interface up? \n");
1842 /* We could try again ... */
1843 dev_kfree_skb_any(resp); /* we need to free if tx fails */
1844 }
1845out:
1846 kfree_skb(frame_ctx->skb);
1847 kfree(frame_ctx);
1848 mutex_unlock(&wss->mutex);
1849 d_fnend(6, dev, "WLP: handle C1 frame. wlp = %p\n", wlp);
1850}
1851
1852/**
1853 * Handle incoming C3 frame
1854 *
1855 * The frame has already been verified to contain an Association header with
1856 * the correct version number. Parse the incoming frame, construct and send
1857 * a C4 frame in response. If the C3 frame identifies a WSS that is locally
1858 * active then we connect to this neighbor (add it to our EDA cache).
1859 */
1860void wlp_handle_c3_frame(struct work_struct *ws)
1861{
1862 struct wlp_assoc_frame_ctx *frame_ctx = container_of(ws,
1863 struct wlp_assoc_frame_ctx,
1864 ws);
1865 struct wlp *wlp = frame_ctx->wlp;
1866 struct wlp_wss *wss = &wlp->wss;
1867 struct device *dev = &wlp->rc->uwb_dev.dev;
1868 struct sk_buff *skb = frame_ctx->skb;
1869 struct uwb_dev_addr *src = &frame_ctx->src;
1870 int result;
1871 char buf[WLP_WSS_UUID_STRSIZE];
1872 struct sk_buff *resp = NULL;
1873 struct wlp_uuid wssid;
1874 u8 tag;
1875 struct uwb_mac_addr virt_addr;
1876
1877 /* Parse C3 frame */
1878 d_fnstart(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n",
1879 wlp, skb);
1880 mutex_lock(&wss->mutex);
1881 result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr);
1882 if (result < 0) {
1883 dev_err(dev, "WLP: unable to obtain values from C3 frame.\n");
1884 goto out;
1885 }
1886 wlp_wss_uuid_print(buf, sizeof(buf), &wssid);
1887 d_printf(6, dev, "Received C3 frame with WSSID %s \n", buf);
1888 if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))
1889 && wss->state >= WLP_WSS_STATE_ACTIVE) {
1890 d_printf(6, dev, "WSSID from C3 frame is known locally "
1891 "and is active\n");
1892 result = wlp_eda_update_node(&wlp->eda, src, wss,
1893 (void *) virt_addr.data, tag,
1894 WLP_WSS_CONNECTED);
1895 if (result < 0) {
1896 dev_err(dev, "WLP: Unable to update EDA cache "
1897 "with new connected neighbor information.\n");
1898 result = wlp_build_assoc_f0(wlp, &resp,
1899 WLP_ASSOC_ERROR_INT);
1900 if (result < 0) {
1901 dev_err(dev, "WLP: Unable to construct F0 "
1902 "message.\n");
1903 goto out;
1904 }
1905 } else {
1906 wss->state = WLP_WSS_STATE_CONNECTED;
1907 /* Construct C4 frame */
1908 result = wlp_build_assoc_c4(wlp, wss, &resp);
1909 if (result < 0) {
1910 dev_err(dev, "WLP: Unable to construct C4 "
1911 "message.\n");
1912 goto out;
1913 }
1914 }
1915 } else {
1916 d_printf(6, dev, "WSSID from C3 frame is not known locally "
1917 "or is not active\n");
1918 /* Construct F0 frame */
1919 result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV);
1920 if (result < 0) {
1921 dev_err(dev, "WLP: Unable to construct F0 message.\n");
1922 goto out;
1923 }
1924 }
1925 /* Send C4 frame */
1926 d_printf(6, dev, "Transmitting response (C4/F0) frame to %02x:%02x \n",
1927 src->data[1], src->data[0]);
1928 BUG_ON(wlp->xmit_frame == NULL);
1929 result = wlp->xmit_frame(wlp, resp, src);
1930 if (result < 0) {
1931 dev_err(dev, "WLP: Unable to transmit response association "
1932 "message: %d\n", result);
1933 if (result == -ENXIO)
1934 dev_err(dev, "WLP: Is network interface up? \n");
1935 /* We could try again ... */
1936 dev_kfree_skb_any(resp); /* we need to free if tx fails */
1937 }
1938out:
1939 kfree_skb(frame_ctx->skb);
1940 kfree(frame_ctx);
1941 mutex_unlock(&wss->mutex);
1942 d_fnend(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n",
1943 wlp, skb);
1944}
1945
1946
diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
new file mode 100644
index 000000000000..1bb9b1f97d47
--- /dev/null
+++ b/drivers/uwb/wlp/sysfs.c
@@ -0,0 +1,709 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 * sysfs functions
4 *
5 * Copyright (C) 2007 Intel Corporation
6 * Reinette Chatre <reinette.chatre@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: Docs
24 *
25 */
26
27#include <linux/wlp.h>
28#include "wlp-internal.h"
29
30static
31size_t wlp_wss_wssid_e_print(char *buf, size_t bufsize,
32 struct wlp_wssid_e *wssid_e)
33{
34 size_t used = 0;
35 used += scnprintf(buf, bufsize, " WSS: ");
36 used += wlp_wss_uuid_print(buf + used, bufsize - used,
37 &wssid_e->wssid);
38
39 if (wssid_e->info != NULL) {
40 used += scnprintf(buf + used, bufsize - used, " ");
41 used += uwb_mac_addr_print(buf + used, bufsize - used,
42 &wssid_e->info->bcast);
43 used += scnprintf(buf + used, bufsize - used, " %u %u %s\n",
44 wssid_e->info->accept_enroll,
45 wssid_e->info->sec_status,
46 wssid_e->info->name);
47 }
48 return used;
49}
50
51/**
52 * Print out information learned from neighbor discovery
53 *
54 * Some fields being printed may not be included in the device discovery
55 * information (it is not mandatory). We are thus careful how the
56 * information is printed to ensure it is clear to the user what field is
57 * being referenced.
58 * The information being printed is for one time use - temporary storage is
59 * cleaned after it is printed.
60 *
61 * Ideally sysfs output should be on one line. The information printed here
62 * contain a few strings so it will be hard to parse if they are all
63 * printed on the same line - without agreeing on a standard field
64 * separator.
65 */
66static
67ssize_t wlp_wss_neighborhood_print_remove(struct wlp *wlp, char *buf,
68 size_t bufsize)
69{
70 size_t used = 0;
71 struct wlp_neighbor_e *neighb;
72 struct wlp_wssid_e *wssid_e;
73
74 mutex_lock(&wlp->nbmutex);
75 used = scnprintf(buf, bufsize, "#Neighbor information\n"
76 "#uuid dev_addr\n"
77 "# Device Name:\n# Model Name:\n# Manufacturer:\n"
78 "# Model Nr:\n# Serial:\n"
79 "# Pri Dev type: CategoryID OUI OUISubdiv "
80 "SubcategoryID\n"
81 "# WSS: WSSID WSS_name accept_enroll sec_status "
82 "bcast\n"
83 "# WSS: WSSID WSS_name accept_enroll sec_status "
84 "bcast\n\n");
85 list_for_each_entry(neighb, &wlp->neighbors, node) {
86 if (bufsize - used <= 0)
87 goto out;
88 used += wlp_wss_uuid_print(buf + used, bufsize - used,
89 &neighb->uuid);
90 buf[used++] = ' ';
91 used += uwb_dev_addr_print(buf + used, bufsize - used,
92 &neighb->uwb_dev->dev_addr);
93 if (neighb->info != NULL)
94 used += scnprintf(buf + used, bufsize - used,
95 "\n Device Name: %s\n"
96 " Model Name: %s\n"
97 " Manufacturer:%s \n"
98 " Model Nr: %s\n"
99 " Serial: %s\n"
100 " Pri Dev type: "
101 "%u %02x:%02x:%02x %u %u\n",
102 neighb->info->name,
103 neighb->info->model_name,
104 neighb->info->manufacturer,
105 neighb->info->model_nr,
106 neighb->info->serial,
107 neighb->info->prim_dev_type.category,
108 neighb->info->prim_dev_type.OUI[0],
109 neighb->info->prim_dev_type.OUI[1],
110 neighb->info->prim_dev_type.OUI[2],
111 neighb->info->prim_dev_type.OUIsubdiv,
112 neighb->info->prim_dev_type.subID);
113 list_for_each_entry(wssid_e, &neighb->wssid, node) {
114 used += wlp_wss_wssid_e_print(buf + used,
115 bufsize - used,
116 wssid_e);
117 }
118 buf[used++] = '\n';
119 wlp_remove_neighbor_tmp_info(neighb);
120 }
121
122
123out:
124 mutex_unlock(&wlp->nbmutex);
125 return used;
126}
127
128
129/**
130 * Show properties of all WSS in neighborhood.
131 *
132 * Will trigger a complete discovery of WSS activated by this device and
133 * its neighbors.
134 */
135ssize_t wlp_neighborhood_show(struct wlp *wlp, char *buf)
136{
137 wlp_discover(wlp);
138 return wlp_wss_neighborhood_print_remove(wlp, buf, PAGE_SIZE);
139}
140EXPORT_SYMBOL_GPL(wlp_neighborhood_show);
141
142static
143ssize_t __wlp_wss_properties_show(struct wlp_wss *wss, char *buf,
144 size_t bufsize)
145{
146 ssize_t result;
147
148 result = wlp_wss_uuid_print(buf, bufsize, &wss->wssid);
149 result += scnprintf(buf + result, bufsize - result, " ");
150 result += uwb_mac_addr_print(buf + result, bufsize - result,
151 &wss->bcast);
152 result += scnprintf(buf + result, bufsize - result,
153 " 0x%02x %u ", wss->hash, wss->secure_status);
154 result += wlp_wss_key_print(buf + result, bufsize - result,
155 wss->master_key);
156 result += scnprintf(buf + result, bufsize - result, " 0x%02x ",
157 wss->tag);
158 result += uwb_mac_addr_print(buf + result, bufsize - result,
159 &wss->virtual_addr);
160 result += scnprintf(buf + result, bufsize - result, " %s", wss->name);
161 result += scnprintf(buf + result, bufsize - result,
162 "\n\n#WSSID\n#WSS broadcast address\n"
163 "#WSS hash\n#WSS secure status\n"
164 "#WSS master key\n#WSS local tag\n"
165 "#WSS local virtual EUI-48\n#WSS name\n");
166 return result;
167}
168
169/**
170 * Show which WSS is activated.
171 */
172ssize_t wlp_wss_activate_show(struct wlp_wss *wss, char *buf)
173{
174 int result = 0;
175
176 if (mutex_lock_interruptible(&wss->mutex))
177 goto out;
178 if (wss->state >= WLP_WSS_STATE_ACTIVE)
179 result = __wlp_wss_properties_show(wss, buf, PAGE_SIZE);
180 else
181 result = scnprintf(buf, PAGE_SIZE, "No local WSS active.\n");
182 result += scnprintf(buf + result, PAGE_SIZE - result,
183 "\n\n"
184 "# echo WSSID SECURE_STATUS ACCEPT_ENROLLMENT "
185 "NAME #create new WSS\n"
186 "# echo WSSID [DEV ADDR] #enroll in and activate "
187 "existing WSS, can request registrar\n"
188 "#\n"
189 "# WSSID is a 16 byte hex array. Eg. 12 A3 3B ... \n"
190 "# SECURE_STATUS 0 - unsecure, 1 - secure (default)\n"
191 "# ACCEPT_ENROLLMENT 0 - no, 1 - yes (default)\n"
192 "# NAME is the text string identifying the WSS\n"
193 "# DEV ADDR is the device address of neighbor "
194 "that should be registrar. Eg. 32:AB\n");
195
196 mutex_unlock(&wss->mutex);
197out:
198 return result;
199
200}
201EXPORT_SYMBOL_GPL(wlp_wss_activate_show);
202
203/**
204 * Create/activate a new WSS or enroll/activate in neighboring WSS
205 *
206 * The user can provide the WSSID of a WSS in which it wants to enroll.
207 * Only the WSSID is necessary if the WSS have been discovered before. If
208 * the WSS has not been discovered before, or the user wants to use a
209 * particular neighbor as its registrar, then the user can also provide a
210 * device address or the neighbor that will be used as registrar.
211 *
212 * A new WSS is created when the user provides a WSSID, secure status, and
213 * WSS name.
214 */
215ssize_t wlp_wss_activate_store(struct wlp_wss *wss,
216 const char *buf, size_t size)
217{
218 ssize_t result = -EINVAL;
219 struct wlp_uuid wssid;
220 struct uwb_dev_addr dev;
221 struct uwb_dev_addr bcast = {.data = {0xff, 0xff} };
222 char name[65];
223 unsigned sec_status, accept;
224 memset(name, 0, sizeof(name));
225 result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx "
226 "%02hhx %02hhx %02hhx %02hhx "
227 "%02hhx %02hhx %02hhx %02hhx "
228 "%02hhx %02hhx %02hhx %02hhx "
229 "%02hhx:%02hhx",
230 &wssid.data[0] , &wssid.data[1],
231 &wssid.data[2] , &wssid.data[3],
232 &wssid.data[4] , &wssid.data[5],
233 &wssid.data[6] , &wssid.data[7],
234 &wssid.data[8] , &wssid.data[9],
235 &wssid.data[10], &wssid.data[11],
236 &wssid.data[12], &wssid.data[13],
237 &wssid.data[14], &wssid.data[15],
238 &dev.data[1], &dev.data[0]);
239 if (result == 16 || result == 17) {
240 result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx "
241 "%02hhx %02hhx %02hhx %02hhx "
242 "%02hhx %02hhx %02hhx %02hhx "
243 "%02hhx %02hhx %02hhx %02hhx "
244 "%u %u %64c",
245 &wssid.data[0] , &wssid.data[1],
246 &wssid.data[2] , &wssid.data[3],
247 &wssid.data[4] , &wssid.data[5],
248 &wssid.data[6] , &wssid.data[7],
249 &wssid.data[8] , &wssid.data[9],
250 &wssid.data[10], &wssid.data[11],
251 &wssid.data[12], &wssid.data[13],
252 &wssid.data[14], &wssid.data[15],
253 &sec_status, &accept, name);
254 if (result == 16)
255 result = wlp_wss_enroll_activate(wss, &wssid, &bcast);
256 else if (result == 19) {
257 sec_status = sec_status == 0 ? 0 : 1;
258 accept = accept == 0 ? 0 : 1;
259 /* We read name using %c, so the newline needs to be
260 * removed */
261 if (strlen(name) != sizeof(name) - 1)
262 name[strlen(name) - 1] = '\0';
263 result = wlp_wss_create_activate(wss, &wssid, name,
264 sec_status, accept);
265 } else
266 result = -EINVAL;
267 } else if (result == 18)
268 result = wlp_wss_enroll_activate(wss, &wssid, &dev);
269 else
270 result = -EINVAL;
271 return result < 0 ? result : size;
272}
273EXPORT_SYMBOL_GPL(wlp_wss_activate_store);
274
275/**
276 * Show the UUID of this host
277 */
278ssize_t wlp_uuid_show(struct wlp *wlp, char *buf)
279{
280 ssize_t result = 0;
281
282 mutex_lock(&wlp->mutex);
283 result = wlp_wss_uuid_print(buf, PAGE_SIZE, &wlp->uuid);
284 buf[result++] = '\n';
285 mutex_unlock(&wlp->mutex);
286 return result;
287}
288EXPORT_SYMBOL_GPL(wlp_uuid_show);
289
290/**
291 * Store a new UUID for this host
292 *
293 * According to the spec this should be encoded as an octet string in the
294 * order the octets are shown in string representation in RFC 4122 (WLP
295 * 0.99 [Table 6])
296 *
297 * We do not check value provided by user.
298 */
299ssize_t wlp_uuid_store(struct wlp *wlp, const char *buf, size_t size)
300{
301 ssize_t result;
302 struct wlp_uuid uuid;
303
304 mutex_lock(&wlp->mutex);
305 result = sscanf(buf, "%02hhx %02hhx %02hhx %02hhx "
306 "%02hhx %02hhx %02hhx %02hhx "
307 "%02hhx %02hhx %02hhx %02hhx "
308 "%02hhx %02hhx %02hhx %02hhx ",
309 &uuid.data[0] , &uuid.data[1],
310 &uuid.data[2] , &uuid.data[3],
311 &uuid.data[4] , &uuid.data[5],
312 &uuid.data[6] , &uuid.data[7],
313 &uuid.data[8] , &uuid.data[9],
314 &uuid.data[10], &uuid.data[11],
315 &uuid.data[12], &uuid.data[13],
316 &uuid.data[14], &uuid.data[15]);
317 if (result != 16) {
318 result = -EINVAL;
319 goto error;
320 }
321 wlp->uuid = uuid;
322error:
323 mutex_unlock(&wlp->mutex);
324 return result < 0 ? result : size;
325}
326EXPORT_SYMBOL_GPL(wlp_uuid_store);
327
328/**
329 * Show contents of members of device information structure
330 */
331#define wlp_dev_info_show(type) \
332ssize_t wlp_dev_##type##_show(struct wlp *wlp, char *buf) \
333{ \
334 ssize_t result = 0; \
335 mutex_lock(&wlp->mutex); \
336 if (wlp->dev_info == NULL) { \
337 result = __wlp_setup_device_info(wlp); \
338 if (result < 0) \
339 goto out; \
340 } \
341 result = scnprintf(buf, PAGE_SIZE, "%s\n", wlp->dev_info->type);\
342out: \
343 mutex_unlock(&wlp->mutex); \
344 return result; \
345} \
346EXPORT_SYMBOL_GPL(wlp_dev_##type##_show);
347
348wlp_dev_info_show(name)
349wlp_dev_info_show(model_name)
350wlp_dev_info_show(model_nr)
351wlp_dev_info_show(manufacturer)
352wlp_dev_info_show(serial)
353
354/**
355 * Store contents of members of device information structure
356 */
357#define wlp_dev_info_store(type, len) \
358ssize_t wlp_dev_##type##_store(struct wlp *wlp, const char *buf, size_t size)\
359{ \
360 ssize_t result; \
361 char format[10]; \
362 mutex_lock(&wlp->mutex); \
363 if (wlp->dev_info == NULL) { \
364 result = __wlp_alloc_device_info(wlp); \
365 if (result < 0) \
366 goto out; \
367 } \
368 memset(wlp->dev_info->type, 0, sizeof(wlp->dev_info->type)); \
369 sprintf(format, "%%%uc", len); \
370 result = sscanf(buf, format, wlp->dev_info->type); \
371out: \
372 mutex_unlock(&wlp->mutex); \
373 return result < 0 ? result : size; \
374} \
375EXPORT_SYMBOL_GPL(wlp_dev_##type##_store);
376
377wlp_dev_info_store(name, 32)
378wlp_dev_info_store(manufacturer, 64)
379wlp_dev_info_store(model_name, 32)
380wlp_dev_info_store(model_nr, 32)
381wlp_dev_info_store(serial, 32)
382
383static
384const char *__wlp_dev_category[] = {
385 [WLP_DEV_CAT_COMPUTER] = "Computer",
386 [WLP_DEV_CAT_INPUT] = "Input device",
387 [WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER] = "Printer, scanner, FAX, or "
388 "Copier",
389 [WLP_DEV_CAT_CAMERA] = "Camera",
390 [WLP_DEV_CAT_STORAGE] = "Storage Network",
391 [WLP_DEV_CAT_INFRASTRUCTURE] = "Infrastructure",
392 [WLP_DEV_CAT_DISPLAY] = "Display",
393 [WLP_DEV_CAT_MULTIM] = "Multimedia device",
394 [WLP_DEV_CAT_GAMING] = "Gaming device",
395 [WLP_DEV_CAT_TELEPHONE] = "Telephone",
396 [WLP_DEV_CAT_OTHER] = "Other",
397};
398
399static
400const char *wlp_dev_category_str(unsigned cat)
401{
402 if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE)
403 || cat == WLP_DEV_CAT_OTHER)
404 return __wlp_dev_category[cat];
405 return "unknown category";
406}
407
408ssize_t wlp_dev_prim_category_show(struct wlp *wlp, char *buf)
409{
410 ssize_t result = 0;
411 mutex_lock(&wlp->mutex);
412 if (wlp->dev_info == NULL) {
413 result = __wlp_setup_device_info(wlp);
414 if (result < 0)
415 goto out;
416 }
417 result = scnprintf(buf, PAGE_SIZE, "%s\n",
418 wlp_dev_category_str(wlp->dev_info->prim_dev_type.category));
419out:
420 mutex_unlock(&wlp->mutex);
421 return result;
422}
423EXPORT_SYMBOL_GPL(wlp_dev_prim_category_show);
424
425ssize_t wlp_dev_prim_category_store(struct wlp *wlp, const char *buf,
426 size_t size)
427{
428 ssize_t result;
429 u16 cat;
430 mutex_lock(&wlp->mutex);
431 if (wlp->dev_info == NULL) {
432 result = __wlp_alloc_device_info(wlp);
433 if (result < 0)
434 goto out;
435 }
436 result = sscanf(buf, "%hu", &cat);
437 if ((cat >= WLP_DEV_CAT_COMPUTER && cat <= WLP_DEV_CAT_TELEPHONE)
438 || cat == WLP_DEV_CAT_OTHER)
439 wlp->dev_info->prim_dev_type.category = cat;
440 else
441 result = -EINVAL;
442out:
443 mutex_unlock(&wlp->mutex);
444 return result < 0 ? result : size;
445}
446EXPORT_SYMBOL_GPL(wlp_dev_prim_category_store);
447
448ssize_t wlp_dev_prim_OUI_show(struct wlp *wlp, char *buf)
449{
450 ssize_t result = 0;
451 mutex_lock(&wlp->mutex);
452 if (wlp->dev_info == NULL) {
453 result = __wlp_setup_device_info(wlp);
454 if (result < 0)
455 goto out;
456 }
457 result = scnprintf(buf, PAGE_SIZE, "%02x:%02x:%02x\n",
458 wlp->dev_info->prim_dev_type.OUI[0],
459 wlp->dev_info->prim_dev_type.OUI[1],
460 wlp->dev_info->prim_dev_type.OUI[2]);
461out:
462 mutex_unlock(&wlp->mutex);
463 return result;
464}
465EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_show);
466
467ssize_t wlp_dev_prim_OUI_store(struct wlp *wlp, const char *buf, size_t size)
468{
469 ssize_t result;
470 u8 OUI[3];
471 mutex_lock(&wlp->mutex);
472 if (wlp->dev_info == NULL) {
473 result = __wlp_alloc_device_info(wlp);
474 if (result < 0)
475 goto out;
476 }
477 result = sscanf(buf, "%hhx:%hhx:%hhx",
478 &OUI[0], &OUI[1], &OUI[2]);
479 if (result != 3) {
480 result = -EINVAL;
481 goto out;
482 } else
483 memcpy(wlp->dev_info->prim_dev_type.OUI, OUI, sizeof(OUI));
484out:
485 mutex_unlock(&wlp->mutex);
486 return result < 0 ? result : size;
487}
488EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_store);
489
490
491ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *wlp, char *buf)
492{
493 ssize_t result = 0;
494 mutex_lock(&wlp->mutex);
495 if (wlp->dev_info == NULL) {
496 result = __wlp_setup_device_info(wlp);
497 if (result < 0)
498 goto out;
499 }
500 result = scnprintf(buf, PAGE_SIZE, "%u\n",
501 wlp->dev_info->prim_dev_type.OUIsubdiv);
502out:
503 mutex_unlock(&wlp->mutex);
504 return result;
505}
506EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_sub_show);
507
508ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *wlp, const char *buf,
509 size_t size)
510{
511 ssize_t result;
512 unsigned sub;
513 u8 max_sub = ~0;
514 mutex_lock(&wlp->mutex);
515 if (wlp->dev_info == NULL) {
516 result = __wlp_alloc_device_info(wlp);
517 if (result < 0)
518 goto out;
519 }
520 result = sscanf(buf, "%u", &sub);
521 if (sub <= max_sub)
522 wlp->dev_info->prim_dev_type.OUIsubdiv = sub;
523 else
524 result = -EINVAL;
525out:
526 mutex_unlock(&wlp->mutex);
527 return result < 0 ? result : size;
528}
529EXPORT_SYMBOL_GPL(wlp_dev_prim_OUI_sub_store);
530
531ssize_t wlp_dev_prim_subcat_show(struct wlp *wlp, char *buf)
532{
533 ssize_t result = 0;
534 mutex_lock(&wlp->mutex);
535 if (wlp->dev_info == NULL) {
536 result = __wlp_setup_device_info(wlp);
537 if (result < 0)
538 goto out;
539 }
540 result = scnprintf(buf, PAGE_SIZE, "%u\n",
541 wlp->dev_info->prim_dev_type.subID);
542out:
543 mutex_unlock(&wlp->mutex);
544 return result;
545}
546EXPORT_SYMBOL_GPL(wlp_dev_prim_subcat_show);
547
548ssize_t wlp_dev_prim_subcat_store(struct wlp *wlp, const char *buf,
549 size_t size)
550{
551 ssize_t result;
552 unsigned sub;
553 __le16 max_sub = ~0;
554 mutex_lock(&wlp->mutex);
555 if (wlp->dev_info == NULL) {
556 result = __wlp_alloc_device_info(wlp);
557 if (result < 0)
558 goto out;
559 }
560 result = sscanf(buf, "%u", &sub);
561 if (sub <= max_sub)
562 wlp->dev_info->prim_dev_type.subID = sub;
563 else
564 result = -EINVAL;
565out:
566 mutex_unlock(&wlp->mutex);
567 return result < 0 ? result : size;
568}
569EXPORT_SYMBOL_GPL(wlp_dev_prim_subcat_store);
570
571/**
572 * Subsystem implementation for interaction with individual WSS via sysfs
573 *
574 * Followed instructions for subsystem in Documentation/filesystems/sysfs.txt
575 */
576
577#define kobj_to_wlp_wss(obj) container_of(obj, struct wlp_wss, kobj)
578#define attr_to_wlp_wss_attr(_attr) \
579 container_of(_attr, struct wlp_wss_attribute, attr)
580
581/**
582 * Sysfs subsystem: forward read calls
583 *
584 * Sysfs operation for forwarding read call to the show method of the
585 * attribute owner
586 */
587static
588ssize_t wlp_wss_attr_show(struct kobject *kobj, struct attribute *attr,
589 char *buf)
590{
591 struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr);
592 struct wlp_wss *wss = kobj_to_wlp_wss(kobj);
593 ssize_t ret = -EIO;
594
595 if (wss_attr->show)
596 ret = wss_attr->show(wss, buf);
597 return ret;
598}
599/**
600 * Sysfs subsystem: forward write calls
601 *
602 * Sysfs operation for forwarding write call to the store method of the
603 * attribute owner
604 */
605static
606ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
607 const char *buf, size_t count)
608{
609 struct wlp_wss_attribute *wss_attr = attr_to_wlp_wss_attr(attr);
610 struct wlp_wss *wss = kobj_to_wlp_wss(kobj);
611 ssize_t ret = -EIO;
612
613 if (wss_attr->store)
614 ret = wss_attr->store(wss, buf, count);
615 return ret;
616}
617
618static
619struct sysfs_ops wss_sysfs_ops = {
620 .show = wlp_wss_attr_show,
621 .store = wlp_wss_attr_store,
622};
623
624struct kobj_type wss_ktype = {
625 .release = wlp_wss_release,
626 .sysfs_ops = &wss_sysfs_ops,
627};
628
629
630/**
631 * Sysfs files for individual WSS
632 */
633
634/**
635 * Print static properties of this WSS
636 *
637 * The name of a WSS may not be null teminated. It's max size is 64 bytes
638 * so we copy it to a larger array just to make sure we print sane data.
639 */
640static ssize_t wlp_wss_properties_show(struct wlp_wss *wss, char *buf)
641{
642 int result = 0;
643
644 if (mutex_lock_interruptible(&wss->mutex))
645 goto out;
646 result = __wlp_wss_properties_show(wss, buf, PAGE_SIZE);
647 mutex_unlock(&wss->mutex);
648out:
649 return result;
650}
651WSS_ATTR(properties, S_IRUGO, wlp_wss_properties_show, NULL);
652
653/**
654 * Print all connected members of this WSS
655 * The EDA cache contains all members of WSS neighborhood.
656 */
657static ssize_t wlp_wss_members_show(struct wlp_wss *wss, char *buf)
658{
659 struct wlp *wlp = container_of(wss, struct wlp, wss);
660 return wlp_eda_show(wlp, buf);
661}
662WSS_ATTR(members, S_IRUGO, wlp_wss_members_show, NULL);
663
664static
665const char *__wlp_strstate[] = {
666 "none",
667 "partially enrolled",
668 "enrolled",
669 "active",
670 "connected",
671};
672
673static const char *wlp_wss_strstate(unsigned state)
674{
675 if (state >= ARRAY_SIZE(__wlp_strstate))
676 return "unknown state";
677 return __wlp_strstate[state];
678}
679
680/*
681 * Print current state of this WSS
682 */
683static ssize_t wlp_wss_state_show(struct wlp_wss *wss, char *buf)
684{
685 int result = 0;
686
687 if (mutex_lock_interruptible(&wss->mutex))
688 goto out;
689 result = scnprintf(buf, PAGE_SIZE, "%s\n",
690 wlp_wss_strstate(wss->state));
691 mutex_unlock(&wss->mutex);
692out:
693 return result;
694}
695WSS_ATTR(state, S_IRUGO, wlp_wss_state_show, NULL);
696
697
698static
699struct attribute *wss_attrs[] = {
700 &wss_attr_properties.attr,
701 &wss_attr_members.attr,
702 &wss_attr_state.attr,
703 NULL,
704};
705
706struct attribute_group wss_attr_group = {
707 .name = NULL, /* we want them in the same directory */
708 .attrs = wss_attrs,
709};
diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c
new file mode 100644
index 000000000000..c701bd1a2887
--- /dev/null
+++ b/drivers/uwb/wlp/txrx.c
@@ -0,0 +1,374 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 * Message exchange infrastructure
4 *
5 * Copyright (C) 2007 Intel Corporation
6 * Reinette Chatre <reinette.chatre@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: Docs
24 *
25 */
26
27#include <linux/etherdevice.h>
28#include <linux/wlp.h>
29#define D_LOCAL 5
30#include <linux/uwb/debug.h>
31#include "wlp-internal.h"
32
33
34/**
35 * Direct incoming association msg to correct parsing routine
36 *
37 * We only expect D1, E1, C1, C3 messages as new. All other incoming
38 * association messages should form part of an established session that is
39 * handled elsewhere.
40 * The handling of these messages often require calling sleeping functions
41 * - this cannot be done in interrupt context. We use the kernel's
42 * workqueue to handle these messages.
43 */
44static
45void wlp_direct_assoc_frame(struct wlp *wlp, struct sk_buff *skb,
46 struct uwb_dev_addr *src)
47{
48 struct device *dev = &wlp->rc->uwb_dev.dev;
49 struct wlp_frame_assoc *assoc = (void *) skb->data;
50 struct wlp_assoc_frame_ctx *frame_ctx;
51 d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb);
52 frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC);
53 if (frame_ctx == NULL) {
54 dev_err(dev, "WLP: Unable to allocate memory for association "
55 "frame handling.\n");
56 kfree_skb(skb);
57 goto out;
58 }
59 frame_ctx->wlp = wlp;
60 frame_ctx->skb = skb;
61 frame_ctx->src = *src;
62 switch (assoc->type) {
63 case WLP_ASSOC_D1:
64 d_printf(5, dev, "Received a D1 frame.\n");
65 INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame);
66 schedule_work(&frame_ctx->ws);
67 break;
68 case WLP_ASSOC_E1:
69 d_printf(5, dev, "Received a E1 frame. FIXME?\n");
70 kfree_skb(skb); /* Temporary until we handle it */
71 kfree(frame_ctx); /* Temporary until we handle it */
72 break;
73 case WLP_ASSOC_C1:
74 d_printf(5, dev, "Received a C1 frame.\n");
75 INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame);
76 schedule_work(&frame_ctx->ws);
77 break;
78 case WLP_ASSOC_C3:
79 d_printf(5, dev, "Received a C3 frame.\n");
80 INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame);
81 schedule_work(&frame_ctx->ws);
82 break;
83 default:
84 dev_err(dev, "Received unexpected association frame. "
85 "Type = %d \n", assoc->type);
86 kfree_skb(skb);
87 kfree(frame_ctx);
88 break;
89 }
90out:
91 d_fnend(5, dev, "wlp %p\n", wlp);
92}
93
94/**
95 * Process incoming association frame
96 *
97 * Although it could be possible to deal with some incoming association
98 * messages without creating a new session we are keeping things simple. We
99 * do not accept new association messages if there is a session in progress
100 * and the messages do not belong to that session.
101 *
102 * If an association message arrives that causes the creation of a session
103 * (WLP_ASSOC_E1) while we are in the process of creating a session then we
104 * rely on the neighbor mutex to protect the data. That is, the new session
105 * will not be started until the previous is completed.
106 */
107static
108void wlp_receive_assoc_frame(struct wlp *wlp, struct sk_buff *skb,
109 struct uwb_dev_addr *src)
110{
111 struct device *dev = &wlp->rc->uwb_dev.dev;
112 struct wlp_frame_assoc *assoc = (void *) skb->data;
113 struct wlp_session *session = wlp->session;
114 u8 version;
115 d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb);
116
117 if (wlp_get_version(wlp, &assoc->version, &version,
118 sizeof(assoc->version)) < 0)
119 goto error;
120 if (version != WLP_VERSION) {
121 dev_err(dev, "Unsupported WLP version in association "
122 "message.\n");
123 goto error;
124 }
125 if (session != NULL) {
126 /* Function that created this session is still holding the
127 * &wlp->mutex to protect this session. */
128 if (assoc->type == session->exp_message ||
129 assoc->type == WLP_ASSOC_F0) {
130 if (!memcmp(&session->neighbor_addr, src,
131 sizeof(*src))) {
132 session->data = skb;
133 (session->cb)(wlp);
134 } else {
135 dev_err(dev, "Received expected message from "
136 "unexpected source. Expected message "
137 "%d or F0 from %02x:%02x, but received "
138 "it from %02x:%02x. Dropping.\n",
139 session->exp_message,
140 session->neighbor_addr.data[1],
141 session->neighbor_addr.data[0],
142 src->data[1], src->data[0]);
143 goto error;
144 }
145 } else {
146 dev_err(dev, "Association already in progress. "
147 "Dropping.\n");
148 goto error;
149 }
150 } else {
151 wlp_direct_assoc_frame(wlp, skb, src);
152 }
153 d_fnend(5, dev, "wlp %p\n", wlp);
154 return;
155error:
156 kfree_skb(skb);
157 d_fnend(5, dev, "wlp %p\n", wlp);
158}
159
160/**
161 * Verify incoming frame is from connected neighbor, prep to pass to WLP client
162 *
163 * Verification proceeds according to WLP 0.99 [7.3.1]. The source address
164 * is used to determine which neighbor is sending the frame and the WSS tag
165 * is used to know to which WSS the frame belongs (we only support one WSS
166 * so this test is straight forward).
167 * With the WSS found we need to ensure that we are connected before
168 * allowing the exchange of data frames.
169 */
170static
171int wlp_verify_prep_rx_frame(struct wlp *wlp, struct sk_buff *skb,
172 struct uwb_dev_addr *src)
173{
174 struct device *dev = &wlp->rc->uwb_dev.dev;
175 int result = -EINVAL;
176 struct wlp_eda_node eda_entry;
177 struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data;
178
179 d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb);
180 /*verify*/
181 result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry);
182 if (result < 0) {
183 if (printk_ratelimit())
184 dev_err(dev, "WLP: Incoming frame is from unknown "
185 "neighbor %02x:%02x.\n", src->data[1],
186 src->data[0]);
187 goto out;
188 }
189 if (hdr->tag != eda_entry.tag) {
190 if (printk_ratelimit())
191 dev_err(dev, "WLP: Tag of incoming frame from "
192 "%02x:%02x does not match expected tag. "
193 "Received 0x%02x, expected 0x%02x. \n",
194 src->data[1], src->data[0], hdr->tag,
195 eda_entry.tag);
196 result = -EINVAL;
197 goto out;
198 }
199 if (eda_entry.state != WLP_WSS_CONNECTED) {
200 if (printk_ratelimit())
201 dev_err(dev, "WLP: Incoming frame from "
202 "%02x:%02x does is not from connected WSS.\n",
203 src->data[1], src->data[0]);
204 result = -EINVAL;
205 goto out;
206 }
207 /*prep*/
208 skb_pull(skb, sizeof(*hdr));
209out:
210 d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result);
211 return result;
212}
213
214/**
215 * Receive a WLP frame from device
216 *
217 * @returns: 1 if calling function should free the skb
218 * 0 if it successfully handled skb and freed it
219 * 0 if error occured, will free skb in this case
220 */
221int wlp_receive_frame(struct device *dev, struct wlp *wlp, struct sk_buff *skb,
222 struct uwb_dev_addr *src)
223{
224 unsigned len = skb->len;
225 void *ptr = skb->data;
226 struct wlp_frame_hdr *hdr;
227 int result = 0;
228
229 d_fnstart(6, dev, "skb (%p), len (%u)\n", skb, len);
230 if (len < sizeof(*hdr)) {
231 dev_err(dev, "Not enough data to parse WLP header.\n");
232 result = -EINVAL;
233 goto out;
234 }
235 hdr = ptr;
236 d_dump(6, dev, hdr, sizeof(*hdr));
237 if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) {
238 dev_err(dev, "Not a WLP frame type.\n");
239 result = -EINVAL;
240 goto out;
241 }
242 switch (hdr->type) {
243 case WLP_FRAME_STANDARD:
244 if (len < sizeof(struct wlp_frame_std_abbrv_hdr)) {
245 dev_err(dev, "Not enough data to parse Standard "
246 "WLP header.\n");
247 goto out;
248 }
249 result = wlp_verify_prep_rx_frame(wlp, skb, src);
250 if (result < 0) {
251 if (printk_ratelimit())
252 dev_err(dev, "WLP: Verification of frame "
253 "from neighbor %02x:%02x failed.\n",
254 src->data[1], src->data[0]);
255 goto out;
256 }
257 result = 1;
258 break;
259 case WLP_FRAME_ABBREVIATED:
260 dev_err(dev, "Abbreviated frame received. FIXME?\n");
261 kfree_skb(skb);
262 break;
263 case WLP_FRAME_CONTROL:
264 dev_err(dev, "Control frame received. FIXME?\n");
265 kfree_skb(skb);
266 break;
267 case WLP_FRAME_ASSOCIATION:
268 if (len < sizeof(struct wlp_frame_assoc)) {
269 dev_err(dev, "Not enough data to parse Association "
270 "WLP header.\n");
271 goto out;
272 }
273 d_printf(5, dev, "Association frame received.\n");
274 wlp_receive_assoc_frame(wlp, skb, src);
275 break;
276 default:
277 dev_err(dev, "Invalid frame received.\n");
278 result = -EINVAL;
279 break;
280 }
281out:
282 if (result < 0) {
283 kfree_skb(skb);
284 result = 0;
285 }
286 d_fnend(6, dev, "skb (%p)\n", skb);
287 return result;
288}
289EXPORT_SYMBOL_GPL(wlp_receive_frame);
290
291
292/**
293 * Verify frame from network stack, prepare for further transmission
294 *
295 * @skb: the socket buffer that needs to be prepared for transmission (it
296 * is in need of a WLP header). If this is a broadcast frame we take
297 * over the entire transmission.
298 * If it is a unicast the WSS connection should already be established
299 * and transmission will be done by the calling function.
300 * @dst: On return this will contain the device address to which the
301 * frame is destined.
302 * @returns: 0 on success no tx : WLP header sucessfully applied to skb buffer,
303 * calling function can proceed with tx
304 * 1 on success with tx : WLP will take over transmission of this
305 * frame
306 * <0 on error
307 *
308 * The network stack (WLP client) is attempting to transmit a frame. We can
309 * only transmit data if a local WSS is at least active (connection will be
310 * done here if this is a broadcast frame and neighbor also has the WSS
311 * active).
312 *
313 * The frame can be either broadcast or unicast. Broadcast in a WSS is
314 * supported via multicast, but we don't support multicast yet (until
315 * devices start to support MAB IEs). If a broadcast frame needs to be
316 * transmitted it is treated as a unicast frame to each neighbor. In this
317 * case the WLP takes over transmission of the skb and returns 1
318 * to the caller to indicate so. Also, in this case, if a neighbor has the
319 * same WSS activated but is not connected then the WSS connection will be
320 * done at this time. The neighbor's virtual address will be learned at
321 * this time.
322 *
323 * The destination address in a unicast frame is the virtual address of the
324 * neighbor. This address only becomes known when a WSS connection is
325 * established. We thus rely on a broadcast frame to trigger the setup of
326 * WSS connections to all neighbors before we are able to send unicast
327 * frames to them. This seems reasonable as IP would usually use ARP first
328 * before any unicast frames are sent.
329 *
330 * If we are already connected to the neighbor (neighbor's virtual address
331 * is known) we just prepare the WLP header and the caller will continue to
332 * send the frame.
333 *
334 * A failure in this function usually indicates something that cannot be
335 * fixed automatically. So, if this function fails (@return < 0) the calling
336 * function should not retry to send the frame as it will very likely keep
337 * failing.
338 *
339 */
340int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp,
341 struct sk_buff *skb, struct uwb_dev_addr *dst)
342{
343 int result = -EINVAL;
344 struct ethhdr *eth_hdr = (void *) skb->data;
345
346 d_fnstart(6, dev, "wlp (%p), skb (%p) \n", wlp, skb);
347 if (is_broadcast_ether_addr(eth_hdr->h_dest)) {
348 d_printf(6, dev, "WLP: handling broadcast frame. \n");
349 result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb);
350 if (result < 0) {
351 if (printk_ratelimit())
352 dev_err(dev, "Unable to handle broadcast "
353 "frame from WLP client.\n");
354 goto out;
355 }
356 dev_kfree_skb_irq(skb);
357 result = 1;
358 /* Frame will be transmitted by WLP. */
359 } else {
360 d_printf(6, dev, "WLP: handling unicast frame. \n");
361 result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst,
362 wlp_wss_prep_hdr, skb);
363 if (unlikely(result < 0)) {
364 if (printk_ratelimit())
365 dev_err(dev, "Unable to prepare "
366 "skb for transmission. \n");
367 goto out;
368 }
369 }
370out:
371 d_fnend(6, dev, "wlp (%p), skb (%p). result = %d \n", wlp, skb, result);
372 return result;
373}
374EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame);
diff --git a/drivers/uwb/wlp/wlp-internal.h b/drivers/uwb/wlp/wlp-internal.h
new file mode 100644
index 000000000000..1c94fabfb1a7
--- /dev/null
+++ b/drivers/uwb/wlp/wlp-internal.h
@@ -0,0 +1,228 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 * Internal API
4 *
5 * Copyright (C) 2007 Intel Corporation
6 * Reinette Chatre <reinette.chatre@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23
24#ifndef __WLP_INTERNAL_H__
25#define __WLP_INTERNAL_H__
26
27/**
28 * State of WSS connection
29 *
30 * A device needs to connect to a neighbor in an activated WSS before data
31 * can be transmitted. The spec also distinguishes between a new connection
32 * attempt and a connection attempt after previous connection attempts. The
33 * state WLP_WSS_CONNECT_FAILED is used for this scenario. See WLP 0.99
34 * [7.2.6]
35 */
36enum wlp_wss_connect {
37 WLP_WSS_UNCONNECTED = 0,
38 WLP_WSS_CONNECTED,
39 WLP_WSS_CONNECT_FAILED,
40};
41
42extern struct kobj_type wss_ktype;
43extern struct attribute_group wss_attr_group;
44
45extern int uwb_rc_ie_add(struct uwb_rc *, const struct uwb_ie_hdr *, size_t);
46extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie);
47
48
49/* This should be changed to a dynamic array where entries are sorted
50 * by eth_addr and search is done in a binary form
51 *
52 * Although thinking twice about it: this technologie's maximum reach
53 * is 10 meters...unless you want to pack too much stuff in around
54 * your radio controller/WLP device, the list will probably not be
55 * too big.
56 *
57 * In any case, there is probably some data structure in the kernel
58 * than we could reused for that already.
59 *
60 * The below structure is really just good while we support one WSS per
61 * host.
62 */
63struct wlp_eda_node {
64 struct list_head list_node;
65 unsigned char eth_addr[ETH_ALEN];
66 struct uwb_dev_addr dev_addr;
67 struct wlp_wss *wss;
68 unsigned char virt_addr[ETH_ALEN];
69 u8 tag;
70 enum wlp_wss_connect state;
71};
72
73typedef int (*wlp_eda_for_each_f)(struct wlp *, struct wlp_eda_node *, void *);
74
75extern void wlp_eda_init(struct wlp_eda *);
76extern void wlp_eda_release(struct wlp_eda *);
77extern int wlp_eda_create_node(struct wlp_eda *,
78 const unsigned char eth_addr[ETH_ALEN],
79 const struct uwb_dev_addr *);
80extern void wlp_eda_rm_node(struct wlp_eda *, const struct uwb_dev_addr *);
81extern int wlp_eda_update_node(struct wlp_eda *,
82 const struct uwb_dev_addr *,
83 struct wlp_wss *,
84 const unsigned char virt_addr[ETH_ALEN],
85 const u8, const enum wlp_wss_connect);
86extern int wlp_eda_update_node_state(struct wlp_eda *,
87 const struct uwb_dev_addr *,
88 const enum wlp_wss_connect);
89
90extern int wlp_copy_eda_node(struct wlp_eda *, struct uwb_dev_addr *,
91 struct wlp_eda_node *);
92extern int wlp_eda_for_each(struct wlp_eda *, wlp_eda_for_each_f , void *);
93extern int wlp_eda_for_virtual(struct wlp_eda *,
94 const unsigned char eth_addr[ETH_ALEN],
95 struct uwb_dev_addr *,
96 wlp_eda_for_each_f , void *);
97
98
99extern void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *);
100
101extern size_t wlp_wss_key_print(char *, size_t, u8 *);
102
103/* Function called when no more references to WSS exists */
104extern void wlp_wss_release(struct kobject *);
105
106extern void wlp_wss_reset(struct wlp_wss *);
107extern int wlp_wss_create_activate(struct wlp_wss *, struct wlp_uuid *,
108 char *, unsigned, unsigned);
109extern int wlp_wss_enroll_activate(struct wlp_wss *, struct wlp_uuid *,
110 struct uwb_dev_addr *);
111extern ssize_t wlp_discover(struct wlp *);
112
113extern int wlp_enroll_neighbor(struct wlp *, struct wlp_neighbor_e *,
114 struct wlp_wss *, struct wlp_uuid *);
115extern int wlp_wss_is_active(struct wlp *, struct wlp_wss *,
116 struct uwb_dev_addr *);
117
118struct wlp_assoc_conn_ctx {
119 struct work_struct ws;
120 struct wlp *wlp;
121 struct sk_buff *skb;
122 struct wlp_eda_node eda_entry;
123};
124
125
126extern int wlp_wss_connect_prep(struct wlp *, struct wlp_eda_node *, void *);
127extern int wlp_wss_send_copy(struct wlp *, struct wlp_eda_node *, void *);
128
129
130/* Message handling */
131struct wlp_assoc_frame_ctx {
132 struct work_struct ws;
133 struct wlp *wlp;
134 struct sk_buff *skb;
135 struct uwb_dev_addr src;
136};
137
138extern int wlp_wss_prep_hdr(struct wlp *, struct wlp_eda_node *, void *);
139extern void wlp_handle_d1_frame(struct work_struct *);
140extern int wlp_parse_d2_frame_to_cache(struct wlp *, struct sk_buff *,
141 struct wlp_neighbor_e *);
142extern int wlp_parse_d2_frame_to_enroll(struct wlp_wss *, struct sk_buff *,
143 struct wlp_neighbor_e *,
144 struct wlp_uuid *);
145extern void wlp_handle_c1_frame(struct work_struct *);
146extern void wlp_handle_c3_frame(struct work_struct *);
147extern int wlp_parse_c3c4_frame(struct wlp *, struct sk_buff *,
148 struct wlp_uuid *, u8 *,
149 struct uwb_mac_addr *);
150extern int wlp_parse_f0(struct wlp *, struct sk_buff *);
151extern int wlp_send_assoc_frame(struct wlp *, struct wlp_wss *,
152 struct uwb_dev_addr *, enum wlp_assoc_type);
153extern ssize_t wlp_get_version(struct wlp *, struct wlp_attr_version *,
154 u8 *, ssize_t);
155extern ssize_t wlp_get_wssid(struct wlp *, struct wlp_attr_wssid *,
156 struct wlp_uuid *, ssize_t);
157extern int __wlp_alloc_device_info(struct wlp *);
158extern int __wlp_setup_device_info(struct wlp *);
159
160extern struct wlp_wss_attribute wss_attribute_properties;
161extern struct wlp_wss_attribute wss_attribute_members;
162extern struct wlp_wss_attribute wss_attribute_state;
163
164static inline
165size_t wlp_wss_uuid_print(char *buf, size_t bufsize, struct wlp_uuid *uuid)
166{
167 size_t result;
168
169 result = scnprintf(buf, bufsize,
170 "%02x:%02x:%02x:%02x:%02x:%02x:"
171 "%02x:%02x:%02x:%02x:%02x:%02x:"
172 "%02x:%02x:%02x:%02x",
173 uuid->data[0], uuid->data[1],
174 uuid->data[2], uuid->data[3],
175 uuid->data[4], uuid->data[5],
176 uuid->data[6], uuid->data[7],
177 uuid->data[8], uuid->data[9],
178 uuid->data[10], uuid->data[11],
179 uuid->data[12], uuid->data[13],
180 uuid->data[14], uuid->data[15]);
181 return result;
182}
183
184/**
185 * FIXME: How should a nonce be displayed?
186 */
187static inline
188size_t wlp_wss_nonce_print(char *buf, size_t bufsize, struct wlp_nonce *nonce)
189{
190 size_t result;
191
192 result = scnprintf(buf, bufsize,
193 "%02x %02x %02x %02x %02x %02x "
194 "%02x %02x %02x %02x %02x %02x "
195 "%02x %02x %02x %02x",
196 nonce->data[0], nonce->data[1],
197 nonce->data[2], nonce->data[3],
198 nonce->data[4], nonce->data[5],
199 nonce->data[6], nonce->data[7],
200 nonce->data[8], nonce->data[9],
201 nonce->data[10], nonce->data[11],
202 nonce->data[12], nonce->data[13],
203 nonce->data[14], nonce->data[15]);
204 return result;
205}
206
207
208static inline
209void wlp_session_cb(struct wlp *wlp)
210{
211 struct completion *completion = wlp->session->cb_priv;
212 complete(completion);
213}
214
215static inline
216int wlp_uuid_is_set(struct wlp_uuid *uuid)
217{
218 struct wlp_uuid zero_uuid = { .data = { 0x00, 0x00, 0x00, 0x00,
219 0x00, 0x00, 0x00, 0x00,
220 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00} };
222
223 if (!memcmp(uuid, &zero_uuid, sizeof(*uuid)))
224 return 0;
225 return 1;
226}
227
228#endif /* __WLP_INTERNAL_H__ */
diff --git a/drivers/uwb/wlp/wlp-lc.c b/drivers/uwb/wlp/wlp-lc.c
new file mode 100644
index 000000000000..0799402e73fb
--- /dev/null
+++ b/drivers/uwb/wlp/wlp-lc.c
@@ -0,0 +1,585 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 *
4 * Copyright (C) 2005-2006 Intel Corporation
5 * Reinette Chatre <reinette.chatre@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * FIXME: docs
23 */
24
25#include <linux/wlp.h>
26#define D_LOCAL 6
27#include <linux/uwb/debug.h>
28#include "wlp-internal.h"
29
30
31static
32void wlp_neighbor_init(struct wlp_neighbor_e *neighbor)
33{
34 INIT_LIST_HEAD(&neighbor->wssid);
35}
36
37/**
38 * Create area for device information storage
39 *
40 * wlp->mutex must be held
41 */
42int __wlp_alloc_device_info(struct wlp *wlp)
43{
44 struct device *dev = &wlp->rc->uwb_dev.dev;
45 BUG_ON(wlp->dev_info != NULL);
46 wlp->dev_info = kzalloc(sizeof(struct wlp_device_info), GFP_KERNEL);
47 if (wlp->dev_info == NULL) {
48 dev_err(dev, "WLP: Unable to allocate memory for "
49 "device information.\n");
50 return -ENOMEM;
51 }
52 return 0;
53}
54
55
56/**
57 * Fill in device information using function provided by driver
58 *
59 * wlp->mutex must be held
60 */
61static
62void __wlp_fill_device_info(struct wlp *wlp)
63{
64 struct device *dev = &wlp->rc->uwb_dev.dev;
65
66 BUG_ON(wlp->fill_device_info == NULL);
67 d_printf(6, dev, "Retrieving device information "
68 "from device driver.\n");
69 wlp->fill_device_info(wlp, wlp->dev_info);
70}
71
72/**
73 * Setup device information
74 *
75 * Allocate area for device information and populate it.
76 *
77 * wlp->mutex must be held
78 */
79int __wlp_setup_device_info(struct wlp *wlp)
80{
81 int result;
82 struct device *dev = &wlp->rc->uwb_dev.dev;
83
84 result = __wlp_alloc_device_info(wlp);
85 if (result < 0) {
86 dev_err(dev, "WLP: Unable to allocate area for "
87 "device information.\n");
88 return result;
89 }
90 __wlp_fill_device_info(wlp);
91 return 0;
92}
93
94/**
95 * Remove information about neighbor stored temporarily
96 *
97 * Information learned during discovey should only be stored when the
98 * device enrolls in the neighbor's WSS. We do need to store this
99 * information temporarily in order to present it to the user.
100 *
101 * We are only interested in keeping neighbor WSS information if that
102 * neighbor is accepting enrollment.
103 *
104 * should be called with wlp->nbmutex held
105 */
106void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *neighbor)
107{
108 struct wlp_wssid_e *wssid_e, *next;
109 u8 keep;
110 if (!list_empty(&neighbor->wssid)) {
111 list_for_each_entry_safe(wssid_e, next, &neighbor->wssid,
112 node) {
113 if (wssid_e->info != NULL) {
114 keep = wssid_e->info->accept_enroll;
115 kfree(wssid_e->info);
116 wssid_e->info = NULL;
117 if (!keep) {
118 list_del(&wssid_e->node);
119 kfree(wssid_e);
120 }
121 }
122 }
123 }
124 if (neighbor->info != NULL) {
125 kfree(neighbor->info);
126 neighbor->info = NULL;
127 }
128}
129
130/**
131 * Populate WLP neighborhood cache with neighbor information
132 *
133 * A new neighbor is found. If it is discoverable then we add it to the
134 * neighborhood cache.
135 *
136 */
137static
138int wlp_add_neighbor(struct wlp *wlp, struct uwb_dev *dev)
139{
140 int result = 0;
141 int discoverable;
142 struct wlp_neighbor_e *neighbor;
143
144 d_fnstart(6, &dev->dev, "uwb %p \n", dev);
145 d_printf(6, &dev->dev, "Found neighbor device %02x:%02x \n",
146 dev->dev_addr.data[1], dev->dev_addr.data[0]);
147 /**
148 * FIXME:
149 * Use contents of WLP IE found in beacon cache to determine if
150 * neighbor is discoverable.
151 * The device does not support WLP IE yet so this still needs to be
152 * done. Until then we assume all devices are discoverable.
153 */
154 discoverable = 1; /* will be changed when FIXME disappears */
155 if (discoverable) {
156 /* Add neighbor to cache for discovery */
157 neighbor = kzalloc(sizeof(*neighbor), GFP_KERNEL);
158 if (neighbor == NULL) {
159 dev_err(&dev->dev, "Unable to create memory for "
160 "new neighbor. \n");
161 result = -ENOMEM;
162 goto error_no_mem;
163 }
164 wlp_neighbor_init(neighbor);
165 uwb_dev_get(dev);
166 neighbor->uwb_dev = dev;
167 list_add(&neighbor->node, &wlp->neighbors);
168 }
169error_no_mem:
170 d_fnend(6, &dev->dev, "uwb %p, result = %d \n", dev, result);
171 return result;
172}
173
174/**
175 * Remove one neighbor from cache
176 */
177static
178void __wlp_neighbor_release(struct wlp_neighbor_e *neighbor)
179{
180 struct wlp_wssid_e *wssid_e, *next_wssid_e;
181
182 list_for_each_entry_safe(wssid_e, next_wssid_e,
183 &neighbor->wssid, node) {
184 list_del(&wssid_e->node);
185 kfree(wssid_e);
186 }
187 uwb_dev_put(neighbor->uwb_dev);
188 list_del(&neighbor->node);
189 kfree(neighbor);
190}
191
192/**
193 * Clear entire neighborhood cache.
194 */
195static
196void __wlp_neighbors_release(struct wlp *wlp)
197{
198 struct wlp_neighbor_e *neighbor, *next;
199 if (list_empty(&wlp->neighbors))
200 return;
201 list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) {
202 __wlp_neighbor_release(neighbor);
203 }
204}
205
206static
207void wlp_neighbors_release(struct wlp *wlp)
208{
209 mutex_lock(&wlp->nbmutex);
210 __wlp_neighbors_release(wlp);
211 mutex_unlock(&wlp->nbmutex);
212}
213
214
215
216/**
217 * Send D1 message to neighbor, receive D2 message
218 *
219 * @neighbor: neighbor to which D1 message will be sent
220 * @wss: if not NULL, it is an enrollment request for this WSS
221 * @wssid: if wss not NULL, this is the wssid of the WSS in which we
222 * want to enroll
223 *
224 * A D1/D2 exchange is done for one of two reasons: discovery or
225 * enrollment. If done for discovery the D1 message is sent to the neighbor
226 * and the contents of the D2 response is stored in a temporary cache.
227 * If done for enrollment the @wss and @wssid are provided also. In this
228 * case the D1 message is sent to the neighbor, the D2 response is parsed
229 * for enrollment of the WSS with wssid.
230 *
231 * &wss->mutex is held
232 */
233static
234int wlp_d1d2_exchange(struct wlp *wlp, struct wlp_neighbor_e *neighbor,
235 struct wlp_wss *wss, struct wlp_uuid *wssid)
236{
237 int result;
238 struct device *dev = &wlp->rc->uwb_dev.dev;
239 DECLARE_COMPLETION_ONSTACK(completion);
240 struct wlp_session session;
241 struct sk_buff *skb;
242 struct wlp_frame_assoc *resp;
243 struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr;
244
245 mutex_lock(&wlp->mutex);
246 if (!wlp_uuid_is_set(&wlp->uuid)) {
247 dev_err(dev, "WLP: UUID is not set. Set via sysfs to "
248 "proceed.\n");
249 result = -ENXIO;
250 goto out;
251 }
252 /* Send D1 association frame */
253 result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_D1);
254 if (result < 0) {
255 dev_err(dev, "Unable to send D1 frame to neighbor "
256 "%02x:%02x (%d)\n", dev_addr->data[1],
257 dev_addr->data[0], result);
258 d_printf(6, dev, "Add placeholders into buffer next to "
259 "neighbor information we have (dev address).\n");
260 goto out;
261 }
262 /* Create session, wait for response */
263 session.exp_message = WLP_ASSOC_D2;
264 session.cb = wlp_session_cb;
265 session.cb_priv = &completion;
266 session.neighbor_addr = *dev_addr;
267 BUG_ON(wlp->session != NULL);
268 wlp->session = &session;
269 /* Wait for D2/F0 frame */
270 result = wait_for_completion_interruptible_timeout(&completion,
271 WLP_PER_MSG_TIMEOUT * HZ);
272 if (result == 0) {
273 result = -ETIMEDOUT;
274 dev_err(dev, "Timeout while sending D1 to neighbor "
275 "%02x:%02x.\n", dev_addr->data[1],
276 dev_addr->data[0]);
277 goto error_session;
278 }
279 if (result < 0) {
280 dev_err(dev, "Unable to discover/enroll neighbor %02x:%02x.\n",
281 dev_addr->data[1], dev_addr->data[0]);
282 goto error_session;
283 }
284 /* Parse message in session->data: it will be either D2 or F0 */
285 skb = session.data;
286 resp = (void *) skb->data;
287 d_printf(6, dev, "Received response to D1 frame. \n");
288 d_dump(6, dev, skb->data, skb->len > 72 ? 72 : skb->len);
289
290 if (resp->type == WLP_ASSOC_F0) {
291 result = wlp_parse_f0(wlp, skb);
292 if (result < 0)
293 dev_err(dev, "WLP: Unable to parse F0 from neighbor "
294 "%02x:%02x.\n", dev_addr->data[1],
295 dev_addr->data[0]);
296 result = -EINVAL;
297 goto error_resp_parse;
298 }
299 if (wss == NULL) {
300 /* Discovery */
301 result = wlp_parse_d2_frame_to_cache(wlp, skb, neighbor);
302 if (result < 0) {
303 dev_err(dev, "WLP: Unable to parse D2 message from "
304 "neighbor %02x:%02x for discovery.\n",
305 dev_addr->data[1], dev_addr->data[0]);
306 goto error_resp_parse;
307 }
308 } else {
309 /* Enrollment */
310 result = wlp_parse_d2_frame_to_enroll(wss, skb, neighbor,
311 wssid);
312 if (result < 0) {
313 dev_err(dev, "WLP: Unable to parse D2 message from "
314 "neighbor %02x:%02x for enrollment.\n",
315 dev_addr->data[1], dev_addr->data[0]);
316 goto error_resp_parse;
317 }
318 }
319error_resp_parse:
320 kfree_skb(skb);
321error_session:
322 wlp->session = NULL;
323out:
324 mutex_unlock(&wlp->mutex);
325 return result;
326}
327
328/**
329 * Enroll into WSS of provided WSSID by using neighbor as registrar
330 *
331 * &wss->mutex is held
332 */
333int wlp_enroll_neighbor(struct wlp *wlp, struct wlp_neighbor_e *neighbor,
334 struct wlp_wss *wss, struct wlp_uuid *wssid)
335{
336 int result = 0;
337 struct device *dev = &wlp->rc->uwb_dev.dev;
338 char buf[WLP_WSS_UUID_STRSIZE];
339 struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr;
340 wlp_wss_uuid_print(buf, sizeof(buf), wssid);
341 d_fnstart(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n",
342 wlp, neighbor, wss, wssid, buf);
343 d_printf(6, dev, "Complete me.\n");
344 result = wlp_d1d2_exchange(wlp, neighbor, wss, wssid);
345 if (result < 0) {
346 dev_err(dev, "WLP: D1/D2 message exchange for enrollment "
347 "failed. result = %d \n", result);
348 goto out;
349 }
350 if (wss->state != WLP_WSS_STATE_PART_ENROLLED) {
351 dev_err(dev, "WLP: Unable to enroll into WSS %s using "
352 "neighbor %02x:%02x. \n", buf,
353 dev_addr->data[1], dev_addr->data[0]);
354 result = -EINVAL;
355 goto out;
356 }
357 if (wss->secure_status == WLP_WSS_SECURE) {
358 dev_err(dev, "FIXME: need to complete secure enrollment.\n");
359 result = -EINVAL;
360 goto error;
361 } else {
362 wss->state = WLP_WSS_STATE_ENROLLED;
363 d_printf(2, dev, "WLP: Success Enrollment into unsecure WSS "
364 "%s using neighbor %02x:%02x. \n", buf,
365 dev_addr->data[1], dev_addr->data[0]);
366 }
367
368 d_fnend(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n",
369 wlp, neighbor, wss, wssid, buf);
370out:
371 return result;
372error:
373 wlp_wss_reset(wss);
374 return result;
375}
376
377/**
378 * Discover WSS information of neighbor's active WSS
379 */
380static
381int wlp_discover_neighbor(struct wlp *wlp,
382 struct wlp_neighbor_e *neighbor)
383{
384 return wlp_d1d2_exchange(wlp, neighbor, NULL, NULL);
385}
386
387
388/**
389 * Each neighbor in the neighborhood cache is discoverable. Discover it.
390 *
391 * Discovery is done through sending of D1 association frame and parsing
392 * the D2 association frame response. Only wssid from D2 will be included
393 * in neighbor cache, rest is just displayed to user and forgotten.
394 *
395 * The discovery is not done in parallel. This is simple and enables us to
396 * maintain only one association context.
397 *
398 * The discovery of one neighbor does not affect the other, but if the
399 * discovery of a neighbor fails it is removed from the neighborhood cache.
400 */
401static
402int wlp_discover_all_neighbors(struct wlp *wlp)
403{
404 int result = 0;
405 struct device *dev = &wlp->rc->uwb_dev.dev;
406 struct wlp_neighbor_e *neighbor, *next;
407
408 list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) {
409 result = wlp_discover_neighbor(wlp, neighbor);
410 if (result < 0) {
411 dev_err(dev, "WLP: Unable to discover neighbor "
412 "%02x:%02x, removing from neighborhood. \n",
413 neighbor->uwb_dev->dev_addr.data[1],
414 neighbor->uwb_dev->dev_addr.data[0]);
415 __wlp_neighbor_release(neighbor);
416 }
417 }
418 return result;
419}
420
421static int wlp_add_neighbor_helper(struct device *dev, void *priv)
422{
423 struct wlp *wlp = priv;
424 struct uwb_dev *uwb_dev = to_uwb_dev(dev);
425
426 return wlp_add_neighbor(wlp, uwb_dev);
427}
428
429/**
430 * Discover WLP neighborhood
431 *
432 * Will send D1 association frame to all devices in beacon group that have
433 * discoverable bit set in WLP IE. D2 frames will be received, information
434 * displayed to user in @buf. Partial information (from D2 association
435 * frame) will be cached to assist with future association
436 * requests.
437 *
438 * The discovery of the WLP neighborhood is triggered by the user. This
439 * should occur infrequently and we thus free current cache and re-allocate
440 * memory if needed.
441 *
442 * If one neighbor fails during initial discovery (determining if it is a
443 * neighbor or not), we fail all - note that interaction with neighbor has
444 * not occured at this point so if a failure occurs we know something went wrong
445 * locally. We thus undo everything.
446 */
447ssize_t wlp_discover(struct wlp *wlp)
448{
449 int result = 0;
450 struct device *dev = &wlp->rc->uwb_dev.dev;
451
452 d_fnstart(6, dev, "wlp %p \n", wlp);
453 mutex_lock(&wlp->nbmutex);
454 /* Clear current neighborhood cache. */
455 __wlp_neighbors_release(wlp);
456 /* Determine which devices in neighborhood. Repopulate cache. */
457 result = uwb_dev_for_each(wlp->rc, wlp_add_neighbor_helper, wlp);
458 if (result < 0) {
459 /* May have partial neighbor information, release all. */
460 __wlp_neighbors_release(wlp);
461 goto error_dev_for_each;
462 }
463 /* Discover the properties of devices in neighborhood. */
464 result = wlp_discover_all_neighbors(wlp);
465 /* In case of failure we still print our partial results. */
466 if (result < 0) {
467 dev_err(dev, "Unable to fully discover neighborhood. \n");
468 result = 0;
469 }
470error_dev_for_each:
471 mutex_unlock(&wlp->nbmutex);
472 d_fnend(6, dev, "wlp %p \n", wlp);
473 return result;
474}
475
476/**
477 * Handle events from UWB stack
478 *
479 * We handle events conservatively. If a neighbor goes off the air we
480 * remove it from the neighborhood. If an association process is in
481 * progress this function will block waiting for the nbmutex to become
482 * free. The association process will thus be allowed to complete before it
483 * is removed.
484 */
485static
486void wlp_uwb_notifs_cb(void *_wlp, struct uwb_dev *uwb_dev,
487 enum uwb_notifs event)
488{
489 struct wlp *wlp = _wlp;
490 struct device *dev = &wlp->rc->uwb_dev.dev;
491 struct wlp_neighbor_e *neighbor, *next;
492 int result;
493 switch (event) {
494 case UWB_NOTIF_ONAIR:
495 d_printf(6, dev, "UWB device %02x:%02x is onair\n",
496 uwb_dev->dev_addr.data[1],
497 uwb_dev->dev_addr.data[0]);
498 result = wlp_eda_create_node(&wlp->eda,
499 uwb_dev->mac_addr.data,
500 &uwb_dev->dev_addr);
501 if (result < 0)
502 dev_err(dev, "WLP: Unable to add new neighbor "
503 "%02x:%02x to EDA cache.\n",
504 uwb_dev->dev_addr.data[1],
505 uwb_dev->dev_addr.data[0]);
506 break;
507 case UWB_NOTIF_OFFAIR:
508 d_printf(6, dev, "UWB device %02x:%02x is offair\n",
509 uwb_dev->dev_addr.data[1],
510 uwb_dev->dev_addr.data[0]);
511 wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr);
512 mutex_lock(&wlp->nbmutex);
513 list_for_each_entry_safe(neighbor, next, &wlp->neighbors,
514 node) {
515 if (neighbor->uwb_dev == uwb_dev) {
516 d_printf(6, dev, "Removing device from "
517 "neighborhood.\n");
518 __wlp_neighbor_release(neighbor);
519 }
520 }
521 mutex_unlock(&wlp->nbmutex);
522 break;
523 default:
524 dev_err(dev, "don't know how to handle event %d from uwb\n",
525 event);
526 }
527}
528
529int wlp_setup(struct wlp *wlp, struct uwb_rc *rc)
530{
531 struct device *dev = &rc->uwb_dev.dev;
532 int result;
533
534 d_fnstart(6, dev, "wlp %p\n", wlp);
535 BUG_ON(wlp->fill_device_info == NULL);
536 BUG_ON(wlp->xmit_frame == NULL);
537 BUG_ON(wlp->stop_queue == NULL);
538 BUG_ON(wlp->start_queue == NULL);
539 wlp->rc = rc;
540 wlp_eda_init(&wlp->eda);/* Set up address cache */
541 wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb;
542 wlp->uwb_notifs_handler.data = wlp;
543 uwb_notifs_register(rc, &wlp->uwb_notifs_handler);
544
545 uwb_pal_init(&wlp->pal);
546 result = uwb_pal_register(rc, &wlp->pal);
547 if (result < 0)
548 uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler);
549
550 d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result);
551 return result;
552}
553EXPORT_SYMBOL_GPL(wlp_setup);
554
555void wlp_remove(struct wlp *wlp)
556{
557 struct device *dev = &wlp->rc->uwb_dev.dev;
558 d_fnstart(6, dev, "wlp %p\n", wlp);
559 wlp_neighbors_release(wlp);
560 uwb_pal_unregister(wlp->rc, &wlp->pal);
561 uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler);
562 wlp_eda_release(&wlp->eda);
563 mutex_lock(&wlp->mutex);
564 if (wlp->dev_info != NULL)
565 kfree(wlp->dev_info);
566 mutex_unlock(&wlp->mutex);
567 wlp->rc = NULL;
568 /* We have to use NULL here because this function can be called
569 * when the device disappeared. */
570 d_fnend(6, NULL, "wlp %p\n", wlp);
571}
572EXPORT_SYMBOL_GPL(wlp_remove);
573
574/**
575 * wlp_reset_all - reset the WLP hardware
576 * @wlp: the WLP device to reset.
577 *
578 * This schedules a full hardware reset of the WLP device. The radio
579 * controller and any other PALs will also be reset.
580 */
581void wlp_reset_all(struct wlp *wlp)
582{
583 uwb_rc_reset_all(wlp->rc);
584}
585EXPORT_SYMBOL_GPL(wlp_reset_all);
diff --git a/drivers/uwb/wlp/wss-lc.c b/drivers/uwb/wlp/wss-lc.c
new file mode 100644
index 000000000000..96b18c9bd6e9
--- /dev/null
+++ b/drivers/uwb/wlp/wss-lc.c
@@ -0,0 +1,1055 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 *
4 * Copyright (C) 2007 Intel Corporation
5 * Reinette Chatre <reinette.chatre@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Implementation of the WLP association protocol.
23 *
24 * FIXME: Docs
25 *
26 * A UWB network interface will configure a WSS through wlp_wss_setup() after
27 * the interface has been assigned a MAC address, typically after
28 * "ifconfig" has been called. When the interface goes down it should call
29 * wlp_wss_remove().
30 *
31 * When the WSS is ready for use the user interacts via sysfs to create,
32 * discover, and activate WSS.
33 *
34 * wlp_wss_enroll_activate()
35 *
36 * wlp_wss_create_activate()
37 * wlp_wss_set_wssid_hash()
38 * wlp_wss_comp_wssid_hash()
39 * wlp_wss_sel_bcast_addr()
40 * wlp_wss_sysfs_add()
41 *
42 * Called when no more references to WSS exist:
43 * wlp_wss_release()
44 * wlp_wss_reset()
45 */
46
47#include <linux/etherdevice.h> /* for is_valid_ether_addr */
48#include <linux/skbuff.h>
49#include <linux/wlp.h>
50#define D_LOCAL 5
51#include <linux/uwb/debug.h>
52#include "wlp-internal.h"
53
54
55size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key)
56{
57 size_t result;
58
59 result = scnprintf(buf, bufsize,
60 "%02x %02x %02x %02x %02x %02x "
61 "%02x %02x %02x %02x %02x %02x "
62 "%02x %02x %02x %02x",
63 key[0], key[1], key[2], key[3],
64 key[4], key[5], key[6], key[7],
65 key[8], key[9], key[10], key[11],
66 key[12], key[13], key[14], key[15]);
67 return result;
68}
69
70/**
71 * Compute WSSID hash
72 * WLP Draft 0.99 [7.2.1]
73 *
74 * The WSSID hash for a WSSID is the result of an octet-wise exclusive-OR
75 * of all octets in the WSSID.
76 */
77static
78u8 wlp_wss_comp_wssid_hash(struct wlp_uuid *wssid)
79{
80 return wssid->data[0] ^ wssid->data[1] ^ wssid->data[2]
81 ^ wssid->data[3] ^ wssid->data[4] ^ wssid->data[5]
82 ^ wssid->data[6] ^ wssid->data[7] ^ wssid->data[8]
83 ^ wssid->data[9] ^ wssid->data[10] ^ wssid->data[11]
84 ^ wssid->data[12] ^ wssid->data[13] ^ wssid->data[14]
85 ^ wssid->data[15];
86}
87
88/**
89 * Select a multicast EUI-48 for the WSS broadcast address.
90 * WLP Draft 0.99 [7.2.1]
91 *
92 * Selected based on the WiMedia Alliance OUI, 00-13-88, within the WLP
93 * range, [01-13-88-00-01-00, 01-13-88-00-01-FF] inclusive.
94 *
95 * This address is currently hardcoded.
96 * FIXME?
97 */
98static
99struct uwb_mac_addr wlp_wss_sel_bcast_addr(struct wlp_wss *wss)
100{
101 struct uwb_mac_addr bcast = {
102 .data = { 0x01, 0x13, 0x88, 0x00, 0x01, 0x00 }
103 };
104 return bcast;
105}
106
107/**
108 * Clear the contents of the WSS structure - all except kobj, mutex, virtual
109 *
110 * We do not want to reinitialize - the internal kobj should not change as
111 * it still points to the parent received during setup. The mutex should
112 * remain also. We thus just reset values individually.
113 * The virutal address assigned to WSS will remain the same for the
114 * lifetime of the WSS. We only reset the fields that can change during its
115 * lifetime.
116 */
117void wlp_wss_reset(struct wlp_wss *wss)
118{
119 struct wlp *wlp = container_of(wss, struct wlp, wss);
120 struct device *dev = &wlp->rc->uwb_dev.dev;
121 d_fnstart(5, dev, "wss (%p) \n", wss);
122 memset(&wss->wssid, 0, sizeof(wss->wssid));
123 wss->hash = 0;
124 memset(&wss->name[0], 0, sizeof(wss->name));
125 memset(&wss->bcast, 0, sizeof(wss->bcast));
126 wss->secure_status = WLP_WSS_UNSECURE;
127 memset(&wss->master_key[0], 0, sizeof(wss->master_key));
128 wss->tag = 0;
129 wss->state = WLP_WSS_STATE_NONE;
130 d_fnend(5, dev, "wss (%p) \n", wss);
131}
132
133/**
134 * Create sysfs infrastructure for WSS
135 *
136 * The WSS is configured to have the interface as parent (see wlp_wss_setup())
137 * a new sysfs directory that includes wssid as its name is created in the
138 * interface's sysfs directory. The group of files interacting with WSS are
139 * created also.
140 */
141static
142int wlp_wss_sysfs_add(struct wlp_wss *wss, char *wssid_str)
143{
144 struct wlp *wlp = container_of(wss, struct wlp, wss);
145 struct device *dev = &wlp->rc->uwb_dev.dev;
146 int result;
147
148 d_fnstart(5, dev, "wss (%p), wssid: %s\n", wss, wssid_str);
149 result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str);
150 if (result < 0)
151 return result;
152 wss->kobj.ktype = &wss_ktype;
153 result = kobject_init_and_add(&wss->kobj,
154 &wss_ktype, wss->kobj.parent, "wlp");
155 if (result < 0) {
156 dev_err(dev, "WLP: Cannot register WSS kobject.\n");
157 goto error_kobject_register;
158 }
159 result = sysfs_create_group(&wss->kobj, &wss_attr_group);
160 if (result < 0) {
161 dev_err(dev, "WLP: Cannot register WSS attributes: %d\n",
162 result);
163 goto error_sysfs_create_group;
164 }
165 d_fnend(5, dev, "Completed. result = %d \n", result);
166 return 0;
167error_sysfs_create_group:
168
169 kobject_put(&wss->kobj); /* will free name if needed */
170 return result;
171error_kobject_register:
172 kfree(wss->kobj.name);
173 wss->kobj.name = NULL;
174 wss->kobj.ktype = NULL;
175 return result;
176}
177
178
179/**
180 * Release WSS
181 *
182 * No more references exist to this WSS. We should undo everything that was
183 * done in wlp_wss_create_activate() except removing the group. The group
184 * is not removed because an object can be unregistered before the group is
185 * created. We also undo any additional operations on the WSS after this
186 * (addition of members).
187 *
188 * If memory was allocated for the kobject's name then it will
189 * be freed by the kobject system during this time.
190 *
191 * The EDA cache is removed and reinitilized when the WSS is removed. We
192 * thus loose knowledge of members of this WSS at that time and need not do
193 * it here.
194 */
195void wlp_wss_release(struct kobject *kobj)
196{
197 struct wlp_wss *wss = container_of(kobj, struct wlp_wss, kobj);
198
199 wlp_wss_reset(wss);
200}
201
202/**
203 * Enroll into a WSS using provided neighbor as registrar
204 *
205 * First search the neighborhood information to learn which neighbor is
206 * referred to, next proceed with enrollment.
207 *
208 * &wss->mutex is held
209 */
210static
211int wlp_wss_enroll_target(struct wlp_wss *wss, struct wlp_uuid *wssid,
212 struct uwb_dev_addr *dest)
213{
214 struct wlp *wlp = container_of(wss, struct wlp, wss);
215 struct device *dev = &wlp->rc->uwb_dev.dev;
216 struct wlp_neighbor_e *neighbor;
217 char buf[WLP_WSS_UUID_STRSIZE];
218 int result = -ENXIO;
219 struct uwb_dev_addr *dev_addr;
220
221 wlp_wss_uuid_print(buf, sizeof(buf), wssid);
222 d_fnstart(5, dev, "wss %p, wssid %s, registrar %02x:%02x \n",
223 wss, buf, dest->data[1], dest->data[0]);
224 mutex_lock(&wlp->nbmutex);
225 list_for_each_entry(neighbor, &wlp->neighbors, node) {
226 dev_addr = &neighbor->uwb_dev->dev_addr;
227 if (!memcmp(dest, dev_addr, sizeof(*dest))) {
228 d_printf(5, dev, "Neighbor %02x:%02x is valid, "
229 "enrolling. \n",
230 dev_addr->data[1], dev_addr->data[0]);
231 result = wlp_enroll_neighbor(wlp, neighbor, wss,
232 wssid);
233 break;
234 }
235 }
236 if (result == -ENXIO)
237 dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n",
238 dest->data[1], dest->data[0]);
239 mutex_unlock(&wlp->nbmutex);
240 d_fnend(5, dev, "wss %p, wssid %s, registrar %02x:%02x, result %d \n",
241 wss, buf, dest->data[1], dest->data[0], result);
242 return result;
243}
244
245/**
246 * Enroll into a WSS previously discovered
247 *
248 * User provides WSSID of WSS, search for neighbor that has this WSS
249 * activated and attempt to enroll.
250 *
251 * &wss->mutex is held
252 */
253static
254int wlp_wss_enroll_discovered(struct wlp_wss *wss, struct wlp_uuid *wssid)
255{
256 struct wlp *wlp = container_of(wss, struct wlp, wss);
257 struct device *dev = &wlp->rc->uwb_dev.dev;
258 struct wlp_neighbor_e *neighbor;
259 struct wlp_wssid_e *wssid_e;
260 char buf[WLP_WSS_UUID_STRSIZE];
261 int result = -ENXIO;
262
263 wlp_wss_uuid_print(buf, sizeof(buf), wssid);
264 d_fnstart(5, dev, "wss %p, wssid %s \n", wss, buf);
265 mutex_lock(&wlp->nbmutex);
266 list_for_each_entry(neighbor, &wlp->neighbors, node) {
267 list_for_each_entry(wssid_e, &neighbor->wssid, node) {
268 if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) {
269 d_printf(5, dev, "Found WSSID %s in neighbor "
270 "%02x:%02x cache. \n", buf,
271 neighbor->uwb_dev->dev_addr.data[1],
272 neighbor->uwb_dev->dev_addr.data[0]);
273 result = wlp_enroll_neighbor(wlp, neighbor,
274 wss, wssid);
275 if (result == 0) /* enrollment success */
276 goto out;
277 break;
278 }
279 }
280 }
281out:
282 if (result == -ENXIO)
283 dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf);
284 mutex_unlock(&wlp->nbmutex);
285 d_fnend(5, dev, "wss %p, wssid %s, result %d \n", wss, buf, result);
286 return result;
287}
288
289/**
290 * Enroll into WSS with provided WSSID, registrar may be provided
291 *
292 * @wss: out WSS that will be enrolled
293 * @wssid: wssid of neighboring WSS that we want to enroll in
294 * @devaddr: registrar can be specified, will be broadcast (ff:ff) if any
295 * neighbor can be used as registrar.
296 *
297 * &wss->mutex is held
298 */
299static
300int wlp_wss_enroll(struct wlp_wss *wss, struct wlp_uuid *wssid,
301 struct uwb_dev_addr *devaddr)
302{
303 int result;
304 struct wlp *wlp = container_of(wss, struct wlp, wss);
305 struct device *dev = &wlp->rc->uwb_dev.dev;
306 char buf[WLP_WSS_UUID_STRSIZE];
307 struct uwb_dev_addr bcast = {.data = {0xff, 0xff} };
308
309 wlp_wss_uuid_print(buf, sizeof(buf), wssid);
310 if (wss->state != WLP_WSS_STATE_NONE) {
311 dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf);
312 result = -EEXIST;
313 goto error;
314 }
315 if (!memcmp(&bcast, devaddr, sizeof(bcast))) {
316 d_printf(5, dev, "Request to enroll in discovered WSS "
317 "with WSSID %s \n", buf);
318 result = wlp_wss_enroll_discovered(wss, wssid);
319 } else {
320 d_printf(5, dev, "Request to enroll in WSSID %s with "
321 "registrar %02x:%02x\n", buf, devaddr->data[1],
322 devaddr->data[0]);
323 result = wlp_wss_enroll_target(wss, wssid, devaddr);
324 }
325 if (result < 0) {
326 dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n",
327 buf, result);
328 goto error;
329 }
330 d_printf(2, dev, "Successfully enrolled into WSS %s \n", buf);
331 result = wlp_wss_sysfs_add(wss, buf);
332 if (result < 0) {
333 dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n");
334 wlp_wss_reset(wss);
335 }
336error:
337 return result;
338
339}
340
341/**
342 * Activate given WSS
343 *
344 * Prior to activation a WSS must be enrolled. To activate a WSS a device
345 * includes the WSS hash in the WLP IE in its beacon in each superframe.
346 * WLP 0.99 [7.2.5].
347 *
348 * The WSS tag is also computed at this time. We only support one activated
349 * WSS so we can use the hash as a tag - there will never be a conflict.
350 *
351 * We currently only support one activated WSS so only one WSS hash is
352 * included in the WLP IE.
353 */
354static
355int wlp_wss_activate(struct wlp_wss *wss)
356{
357 struct wlp *wlp = container_of(wss, struct wlp, wss);
358 struct device *dev = &wlp->rc->uwb_dev.dev;
359 struct uwb_rc *uwb_rc = wlp->rc;
360 int result;
361 struct {
362 struct wlp_ie wlp_ie;
363 u8 hash; /* only include one hash */
364 } ie_data;
365
366 d_fnstart(5, dev, "Activating WSS %p. \n", wss);
367 BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED);
368 wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid);
369 wss->tag = wss->hash;
370 memset(&ie_data, 0, sizeof(ie_data));
371 ie_data.wlp_ie.hdr.element_id = UWB_IE_WLP;
372 ie_data.wlp_ie.hdr.length = sizeof(ie_data) - sizeof(struct uwb_ie_hdr);
373 wlp_ie_set_hash_length(&ie_data.wlp_ie, sizeof(ie_data.hash));
374 ie_data.hash = wss->hash;
375 result = uwb_rc_ie_add(uwb_rc, &ie_data.wlp_ie.hdr,
376 sizeof(ie_data));
377 if (result < 0) {
378 dev_err(dev, "WLP: Unable to add WLP IE to beacon. "
379 "result = %d.\n", result);
380 goto error_wlp_ie;
381 }
382 wss->state = WLP_WSS_STATE_ACTIVE;
383 result = 0;
384error_wlp_ie:
385 d_fnend(5, dev, "Activating WSS %p, result = %d \n", wss, result);
386 return result;
387}
388
389/**
390 * Enroll in and activate WSS identified by provided WSSID
391 *
392 * The neighborhood cache should contain a list of all neighbors and the
393 * WSS they have activated. Based on that cache we search which neighbor we
394 * can perform the association process with. The user also has option to
395 * specify which neighbor it prefers as registrar.
396 * Successful enrollment is followed by activation.
397 * Successful activation will create the sysfs directory containing
398 * specific information regarding this WSS.
399 */
400int wlp_wss_enroll_activate(struct wlp_wss *wss, struct wlp_uuid *wssid,
401 struct uwb_dev_addr *devaddr)
402{
403 struct wlp *wlp = container_of(wss, struct wlp, wss);
404 struct device *dev = &wlp->rc->uwb_dev.dev;
405 int result = 0;
406 char buf[WLP_WSS_UUID_STRSIZE];
407
408 d_fnstart(5, dev, "Enrollment and activation requested. \n");
409 mutex_lock(&wss->mutex);
410 result = wlp_wss_enroll(wss, wssid, devaddr);
411 if (result < 0) {
412 wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
413 dev_err(dev, "WLP: Enrollment into WSS %s failed.\n", buf);
414 goto error_enroll;
415 }
416 result = wlp_wss_activate(wss);
417 if (result < 0) {
418 dev_err(dev, "WLP: Unable to activate WSS. Undoing enrollment "
419 "result = %d \n", result);
420 /* Undo enrollment */
421 wlp_wss_reset(wss);
422 goto error_activate;
423 }
424error_activate:
425error_enroll:
426 mutex_unlock(&wss->mutex);
427 d_fnend(5, dev, "Completed. result = %d \n", result);
428 return result;
429}
430
431/**
432 * Create, enroll, and activate a new WSS
433 *
434 * @wssid: new wssid provided by user
435 * @name: WSS name requested by used.
436 * @sec_status: security status requested by user
437 *
438 * A user requested the creation of a new WSS. All operations are done
439 * locally. The new WSS will be stored locally, the hash will be included
440 * in the WLP IE, and the sysfs infrastructure for this WSS will be
441 * created.
442 */
443int wlp_wss_create_activate(struct wlp_wss *wss, struct wlp_uuid *wssid,
444 char *name, unsigned sec_status, unsigned accept)
445{
446 struct wlp *wlp = container_of(wss, struct wlp, wss);
447 struct device *dev = &wlp->rc->uwb_dev.dev;
448 int result = 0;
449 char buf[WLP_WSS_UUID_STRSIZE];
450 d_fnstart(5, dev, "Request to create new WSS.\n");
451 result = wlp_wss_uuid_print(buf, sizeof(buf), wssid);
452 d_printf(5, dev, "Request to create WSS: WSSID=%s, name=%s, "
453 "sec_status=%u, accepting enrollment=%u \n",
454 buf, name, sec_status, accept);
455 if (!mutex_trylock(&wss->mutex)) {
456 dev_err(dev, "WLP: WLP association session in progress.\n");
457 return -EBUSY;
458 }
459 if (wss->state != WLP_WSS_STATE_NONE) {
460 dev_err(dev, "WLP: WSS already exists. Not creating new.\n");
461 result = -EEXIST;
462 goto out;
463 }
464 if (wss->kobj.parent == NULL) {
465 dev_err(dev, "WLP: WSS parent not ready. Is network interface "
466 "up?\n");
467 result = -ENXIO;
468 goto out;
469 }
470 if (sec_status == WLP_WSS_SECURE) {
471 dev_err(dev, "WLP: FIXME Creation of secure WSS not "
472 "supported yet.\n");
473 result = -EINVAL;
474 goto out;
475 }
476 wss->wssid = *wssid;
477 memcpy(wss->name, name, sizeof(wss->name));
478 wss->bcast = wlp_wss_sel_bcast_addr(wss);
479 wss->secure_status = sec_status;
480 wss->accept_enroll = accept;
481 /*wss->virtual_addr is initialized in call to wlp_wss_setup*/
482 /* sysfs infrastructure */
483 result = wlp_wss_sysfs_add(wss, buf);
484 if (result < 0) {
485 dev_err(dev, "Cannot set up sysfs for WSS kobject.\n");
486 wlp_wss_reset(wss);
487 goto out;
488 } else
489 result = 0;
490 wss->state = WLP_WSS_STATE_ENROLLED;
491 result = wlp_wss_activate(wss);
492 if (result < 0) {
493 dev_err(dev, "WLP: Unable to activate WSS. Undoing "
494 "enrollment\n");
495 wlp_wss_reset(wss);
496 goto out;
497 }
498 result = 0;
499out:
500 mutex_unlock(&wss->mutex);
501 d_fnend(5, dev, "Completed. result = %d \n", result);
502 return result;
503}
504
505/**
506 * Determine if neighbor has WSS activated
507 *
508 * @returns: 1 if neighbor has WSS activated, zero otherwise
509 *
510 * This can be done in two ways:
511 * - send a C1 frame, parse C2/F0 response
512 * - examine the WLP IE sent by the neighbor
513 *
514 * The WLP IE is not fully supported in hardware so we use the C1/C2 frame
515 * exchange to determine if a WSS is activated. Using the WLP IE should be
516 * faster and should be used when it becomes possible.
517 */
518int wlp_wss_is_active(struct wlp *wlp, struct wlp_wss *wss,
519 struct uwb_dev_addr *dev_addr)
520{
521 int result = 0;
522 struct device *dev = &wlp->rc->uwb_dev.dev;
523 char buf[WLP_WSS_UUID_STRSIZE];
524 DECLARE_COMPLETION_ONSTACK(completion);
525 struct wlp_session session;
526 struct sk_buff *skb;
527 struct wlp_frame_assoc *resp;
528 struct wlp_uuid wssid;
529
530 wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
531 d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
532 wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
533 mutex_lock(&wlp->mutex);
534 /* Send C1 association frame */
535 result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1);
536 if (result < 0) {
537 dev_err(dev, "Unable to send C1 frame to neighbor "
538 "%02x:%02x (%d)\n", dev_addr->data[1],
539 dev_addr->data[0], result);
540 result = 0;
541 goto out;
542 }
543 /* Create session, wait for response */
544 session.exp_message = WLP_ASSOC_C2;
545 session.cb = wlp_session_cb;
546 session.cb_priv = &completion;
547 session.neighbor_addr = *dev_addr;
548 BUG_ON(wlp->session != NULL);
549 wlp->session = &session;
550 /* Wait for C2/F0 frame */
551 result = wait_for_completion_interruptible_timeout(&completion,
552 WLP_PER_MSG_TIMEOUT * HZ);
553 if (result == 0) {
554 dev_err(dev, "Timeout while sending C1 to neighbor "
555 "%02x:%02x.\n", dev_addr->data[1],
556 dev_addr->data[0]);
557 goto out;
558 }
559 if (result < 0) {
560 dev_err(dev, "Unable to send C1 to neighbor %02x:%02x.\n",
561 dev_addr->data[1], dev_addr->data[0]);
562 result = 0;
563 goto out;
564 }
565 /* Parse message in session->data: it will be either C2 or F0 */
566 skb = session.data;
567 resp = (void *) skb->data;
568 d_printf(5, dev, "Received response to C1 frame. \n");
569 d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len);
570 if (resp->type == WLP_ASSOC_F0) {
571 result = wlp_parse_f0(wlp, skb);
572 if (result < 0)
573 dev_err(dev, "WLP: unable to parse incoming F0 "
574 "frame from neighbor %02x:%02x.\n",
575 dev_addr->data[1], dev_addr->data[0]);
576 result = 0;
577 goto error_resp_parse;
578 }
579 /* WLP version and message type fields have already been parsed */
580 result = wlp_get_wssid(wlp, (void *)resp + sizeof(*resp), &wssid,
581 skb->len - sizeof(*resp));
582 if (result < 0) {
583 dev_err(dev, "WLP: unable to obtain WSSID from C2 frame.\n");
584 result = 0;
585 goto error_resp_parse;
586 }
587 if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) {
588 d_printf(5, dev, "WSSID in C2 frame matches local "
589 "active WSS.\n");
590 result = 1;
591 } else {
592 dev_err(dev, "WLP: Received a C2 frame without matching "
593 "WSSID.\n");
594 result = 0;
595 }
596error_resp_parse:
597 kfree_skb(skb);
598out:
599 wlp->session = NULL;
600 mutex_unlock(&wlp->mutex);
601 d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
602 wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
603 return result;
604}
605
606/**
607 * Activate connection with neighbor by updating EDA cache
608 *
609 * @wss: local WSS to which neighbor wants to connect
610 * @dev_addr: neighbor's address
611 * @wssid: neighbor's WSSID - must be same as our WSS's WSSID
612 * @tag: neighbor's WSS tag used to identify frames transmitted by it
613 * @virt_addr: neighbor's virtual EUI-48
614 */
615static
616int wlp_wss_activate_connection(struct wlp *wlp, struct wlp_wss *wss,
617 struct uwb_dev_addr *dev_addr,
618 struct wlp_uuid *wssid, u8 *tag,
619 struct uwb_mac_addr *virt_addr)
620{
621 struct device *dev = &wlp->rc->uwb_dev.dev;
622 int result = 0;
623 char buf[WLP_WSS_UUID_STRSIZE];
624 wlp_wss_uuid_print(buf, sizeof(buf), wssid);
625 d_fnstart(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual "
626 "%02x:%02x:%02x:%02x:%02x:%02x \n", wlp, wss, buf, *tag,
627 virt_addr->data[0], virt_addr->data[1], virt_addr->data[2],
628 virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]);
629
630 if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) {
631 d_printf(5, dev, "WSSID from neighbor frame matches local "
632 "active WSS.\n");
633 /* Update EDA cache */
634 result = wlp_eda_update_node(&wlp->eda, dev_addr, wss,
635 (void *) virt_addr->data, *tag,
636 WLP_WSS_CONNECTED);
637 if (result < 0)
638 dev_err(dev, "WLP: Unable to update EDA cache "
639 "with new connected neighbor information.\n");
640 } else {
641 dev_err(dev, "WLP: Neighbor does not have matching "
642 "WSSID.\n");
643 result = -EINVAL;
644 }
645
646 d_fnend(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual "
647 "%02x:%02x:%02x:%02x:%02x:%02x, result = %d \n",
648 wlp, wss, buf, *tag,
649 virt_addr->data[0], virt_addr->data[1], virt_addr->data[2],
650 virt_addr->data[3], virt_addr->data[4], virt_addr->data[5],
651 result);
652
653 return result;
654}
655
656/**
657 * Connect to WSS neighbor
658 *
659 * Use C3/C4 exchange to determine if neighbor has WSS activated and
660 * retrieve the WSS tag and virtual EUI-48 of the neighbor.
661 */
662static
663int wlp_wss_connect_neighbor(struct wlp *wlp, struct wlp_wss *wss,
664 struct uwb_dev_addr *dev_addr)
665{
666 int result;
667 struct device *dev = &wlp->rc->uwb_dev.dev;
668 char buf[WLP_WSS_UUID_STRSIZE];
669 struct wlp_uuid wssid;
670 u8 tag;
671 struct uwb_mac_addr virt_addr;
672 DECLARE_COMPLETION_ONSTACK(completion);
673 struct wlp_session session;
674 struct wlp_frame_assoc *resp;
675 struct sk_buff *skb;
676
677 wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
678 d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
679 wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
680 mutex_lock(&wlp->mutex);
681 /* Send C3 association frame */
682 result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3);
683 if (result < 0) {
684 dev_err(dev, "Unable to send C3 frame to neighbor "
685 "%02x:%02x (%d)\n", dev_addr->data[1],
686 dev_addr->data[0], result);
687 goto out;
688 }
689 /* Create session, wait for response */
690 session.exp_message = WLP_ASSOC_C4;
691 session.cb = wlp_session_cb;
692 session.cb_priv = &completion;
693 session.neighbor_addr = *dev_addr;
694 BUG_ON(wlp->session != NULL);
695 wlp->session = &session;
696 /* Wait for C4/F0 frame */
697 result = wait_for_completion_interruptible_timeout(&completion,
698 WLP_PER_MSG_TIMEOUT * HZ);
699 if (result == 0) {
700 dev_err(dev, "Timeout while sending C3 to neighbor "
701 "%02x:%02x.\n", dev_addr->data[1],
702 dev_addr->data[0]);
703 result = -ETIMEDOUT;
704 goto out;
705 }
706 if (result < 0) {
707 dev_err(dev, "Unable to send C3 to neighbor %02x:%02x.\n",
708 dev_addr->data[1], dev_addr->data[0]);
709 goto out;
710 }
711 /* Parse message in session->data: it will be either C4 or F0 */
712 skb = session.data;
713 resp = (void *) skb->data;
714 d_printf(5, dev, "Received response to C3 frame. \n");
715 d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len);
716 if (resp->type == WLP_ASSOC_F0) {
717 result = wlp_parse_f0(wlp, skb);
718 if (result < 0)
719 dev_err(dev, "WLP: unable to parse incoming F0 "
720 "frame from neighbor %02x:%02x.\n",
721 dev_addr->data[1], dev_addr->data[0]);
722 result = -EINVAL;
723 goto error_resp_parse;
724 }
725 result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr);
726 if (result < 0) {
727 dev_err(dev, "WLP: Unable to parse C4 frame from neighbor.\n");
728 goto error_resp_parse;
729 }
730 result = wlp_wss_activate_connection(wlp, wss, dev_addr, &wssid, &tag,
731 &virt_addr);
732 if (result < 0) {
733 dev_err(dev, "WLP: Unable to activate connection to "
734 "neighbor %02x:%02x.\n", dev_addr->data[1],
735 dev_addr->data[0]);
736 goto error_resp_parse;
737 }
738error_resp_parse:
739 kfree_skb(skb);
740out:
741 /* Record that we unsuccessfully tried to connect to this neighbor */
742 if (result < 0)
743 wlp_eda_update_node_state(&wlp->eda, dev_addr,
744 WLP_WSS_CONNECT_FAILED);
745 wlp->session = NULL;
746 mutex_unlock(&wlp->mutex);
747 d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
748 wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
749 return result;
750}
751
752/**
753 * Connect to neighbor with common WSS, send pending frame
754 *
755 * This function is scheduled when a frame is destined to a neighbor with
756 * which we do not have a connection. A copy of the EDA cache entry is
757 * provided - not the actual cache entry (because it is protected by a
758 * spinlock).
759 *
760 * First determine if neighbor has the same WSS activated, connect if it
761 * does. The C3/C4 exchange is dual purpose to determine if neighbor has
762 * WSS activated and proceed with the connection.
763 *
764 * The frame that triggered the connection setup is sent after connection
765 * setup.
766 *
767 * network queue is stopped - we need to restart when done
768 *
769 */
770static
771void wlp_wss_connect_send(struct work_struct *ws)
772{
773 struct wlp_assoc_conn_ctx *conn_ctx = container_of(ws,
774 struct wlp_assoc_conn_ctx,
775 ws);
776 struct wlp *wlp = conn_ctx->wlp;
777 struct sk_buff *skb = conn_ctx->skb;
778 struct wlp_eda_node *eda_entry = &conn_ctx->eda_entry;
779 struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
780 struct wlp_wss *wss = &wlp->wss;
781 int result;
782 struct device *dev = &wlp->rc->uwb_dev.dev;
783 char buf[WLP_WSS_UUID_STRSIZE];
784
785 mutex_lock(&wss->mutex);
786 wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid);
787 d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n",
788 wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]);
789 if (wss->state < WLP_WSS_STATE_ACTIVE) {
790 if (printk_ratelimit())
791 dev_err(dev, "WLP: Attempting to connect with "
792 "WSS that is not active or connected.\n");
793 dev_kfree_skb(skb);
794 goto out;
795 }
796 /* Establish connection - send C3 rcv C4 */
797 result = wlp_wss_connect_neighbor(wlp, wss, dev_addr);
798 if (result < 0) {
799 if (printk_ratelimit())
800 dev_err(dev, "WLP: Unable to establish connection "
801 "with neighbor %02x:%02x.\n",
802 dev_addr->data[1], dev_addr->data[0]);
803 dev_kfree_skb(skb);
804 goto out;
805 }
806 /* EDA entry changed, update the local copy being used */
807 result = wlp_copy_eda_node(&wlp->eda, dev_addr, eda_entry);
808 if (result < 0) {
809 if (printk_ratelimit())
810 dev_err(dev, "WLP: Cannot find EDA entry for "
811 "neighbor %02x:%02x \n",
812 dev_addr->data[1], dev_addr->data[0]);
813 }
814 result = wlp_wss_prep_hdr(wlp, eda_entry, skb);
815 if (result < 0) {
816 if (printk_ratelimit())
817 dev_err(dev, "WLP: Unable to prepare frame header for "
818 "transmission (neighbor %02x:%02x). \n",
819 dev_addr->data[1], dev_addr->data[0]);
820 dev_kfree_skb(skb);
821 goto out;
822 }
823 BUG_ON(wlp->xmit_frame == NULL);
824 result = wlp->xmit_frame(wlp, skb, dev_addr);
825 if (result < 0) {
826 if (printk_ratelimit())
827 dev_err(dev, "WLP: Unable to transmit frame: %d\n",
828 result);
829 if (result == -ENXIO)
830 dev_err(dev, "WLP: Is network interface up? \n");
831 /* We could try again ... */
832 dev_kfree_skb(skb);/*we need to free if tx fails */
833 }
834out:
835 kfree(conn_ctx);
836 BUG_ON(wlp->start_queue == NULL);
837 wlp->start_queue(wlp);
838 mutex_unlock(&wss->mutex);
839 d_fnend(5, dev, "wlp %p, wss %p (wssid %s)\n", wlp, wss, buf);
840}
841
842/**
843 * Add WLP header to outgoing skb
844 *
845 * @eda_entry: pointer to neighbor's entry in the EDA cache
846 * @_skb: skb containing data destined to the neighbor
847 */
848int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry,
849 void *_skb)
850{
851 struct device *dev = &wlp->rc->uwb_dev.dev;
852 int result = 0;
853 unsigned char *eth_addr = eda_entry->eth_addr;
854 struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
855 struct sk_buff *skb = _skb;
856 struct wlp_frame_std_abbrv_hdr *std_hdr;
857
858 d_fnstart(6, dev, "wlp %p \n", wlp);
859 if (eda_entry->state == WLP_WSS_CONNECTED) {
860 /* Add WLP header */
861 BUG_ON(skb_headroom(skb) < sizeof(*std_hdr));
862 std_hdr = (void *) __skb_push(skb, sizeof(*std_hdr));
863 std_hdr->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID);
864 std_hdr->hdr.type = WLP_FRAME_STANDARD;
865 std_hdr->tag = eda_entry->wss->tag;
866 } else {
867 if (printk_ratelimit())
868 dev_err(dev, "WLP: Destination neighbor (Ethernet: "
869 "%02x:%02x:%02x:%02x:%02x:%02x, Dev: "
870 "%02x:%02x) is not connected. \n", eth_addr[0],
871 eth_addr[1], eth_addr[2], eth_addr[3],
872 eth_addr[4], eth_addr[5], dev_addr->data[1],
873 dev_addr->data[0]);
874 result = -EINVAL;
875 }
876 d_fnend(6, dev, "wlp %p \n", wlp);
877 return result;
878}
879
880
881/**
882 * Prepare skb for neighbor: connect if not already and prep WLP header
883 *
884 * This function is called in interrupt context, but it needs to sleep. We
885 * temporarily stop the net queue to establish the WLP connection.
886 * Setup of the WLP connection and restart of queue is scheduled
887 * on the default work queue.
888 *
889 * run with eda->lock held (spinlock)
890 */
891int wlp_wss_connect_prep(struct wlp *wlp, struct wlp_eda_node *eda_entry,
892 void *_skb)
893{
894 int result = 0;
895 struct device *dev = &wlp->rc->uwb_dev.dev;
896 struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
897 unsigned char *eth_addr = eda_entry->eth_addr;
898 struct sk_buff *skb = _skb;
899 struct wlp_assoc_conn_ctx *conn_ctx;
900
901 d_fnstart(5, dev, "wlp %p\n", wlp);
902 d_printf(5, dev, "To neighbor %02x:%02x with eth "
903 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev_addr->data[1],
904 dev_addr->data[0], eth_addr[0], eth_addr[1], eth_addr[2],
905 eth_addr[3], eth_addr[4], eth_addr[5]);
906 if (eda_entry->state == WLP_WSS_UNCONNECTED) {
907 /* We don't want any more packets while we set up connection */
908 BUG_ON(wlp->stop_queue == NULL);
909 wlp->stop_queue(wlp);
910 conn_ctx = kmalloc(sizeof(*conn_ctx), GFP_ATOMIC);
911 if (conn_ctx == NULL) {
912 if (printk_ratelimit())
913 dev_err(dev, "WLP: Unable to allocate memory "
914 "for connection handling.\n");
915 result = -ENOMEM;
916 goto out;
917 }
918 conn_ctx->wlp = wlp;
919 conn_ctx->skb = skb;
920 conn_ctx->eda_entry = *eda_entry;
921 INIT_WORK(&conn_ctx->ws, wlp_wss_connect_send);
922 schedule_work(&conn_ctx->ws);
923 result = 1;
924 } else if (eda_entry->state == WLP_WSS_CONNECT_FAILED) {
925 /* Previous connection attempts failed, don't retry - see
926 * conditions for connection in WLP 0.99 [7.6.2] */
927 if (printk_ratelimit())
928 dev_err(dev, "Could not connect to neighbor "
929 "previously. Not retrying. \n");
930 result = -ENONET;
931 goto out;
932 } else { /* eda_entry->state == WLP_WSS_CONNECTED */
933 d_printf(5, dev, "Neighbor is connected, preparing frame.\n");
934 result = wlp_wss_prep_hdr(wlp, eda_entry, skb);
935 }
936out:
937 d_fnend(5, dev, "wlp %p, result = %d \n", wlp, result);
938 return result;
939}
940
941/**
942 * Emulate broadcast: copy skb, send copy to neighbor (connect if not already)
943 *
944 * We need to copy skbs in the case where we emulate broadcast through
945 * unicast. We copy instead of clone because we are modifying the data of
946 * the frame after copying ... clones share data so we cannot emulate
947 * broadcast using clones.
948 *
949 * run with eda->lock held (spinlock)
950 */
951int wlp_wss_send_copy(struct wlp *wlp, struct wlp_eda_node *eda_entry,
952 void *_skb)
953{
954 int result = -ENOMEM;
955 struct device *dev = &wlp->rc->uwb_dev.dev;
956 struct sk_buff *skb = _skb;
957 struct sk_buff *copy;
958 struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr;
959
960 d_fnstart(5, dev, "to neighbor %02x:%02x, skb (%p) \n",
961 dev_addr->data[1], dev_addr->data[0], skb);
962 copy = skb_copy(skb, GFP_ATOMIC);
963 if (copy == NULL) {
964 if (printk_ratelimit())
965 dev_err(dev, "WLP: Unable to copy skb for "
966 "transmission.\n");
967 goto out;
968 }
969 result = wlp_wss_connect_prep(wlp, eda_entry, copy);
970 if (result < 0) {
971 if (printk_ratelimit())
972 dev_err(dev, "WLP: Unable to connect/send skb "
973 "to neighbor.\n");
974 dev_kfree_skb_irq(copy);
975 goto out;
976 } else if (result == 1)
977 /* Frame will be transmitted separately */
978 goto out;
979 BUG_ON(wlp->xmit_frame == NULL);
980 result = wlp->xmit_frame(wlp, copy, dev_addr);
981 if (result < 0) {
982 if (printk_ratelimit())
983 dev_err(dev, "WLP: Unable to transmit frame: %d\n",
984 result);
985 if ((result == -ENXIO) && printk_ratelimit())
986 dev_err(dev, "WLP: Is network interface up? \n");
987 /* We could try again ... */
988 dev_kfree_skb_irq(copy);/*we need to free if tx fails */
989 }
990out:
991 d_fnend(5, dev, "to neighbor %02x:%02x \n", dev_addr->data[1],
992 dev_addr->data[0]);
993 return result;
994}
995
996
997/**
998 * Setup WSS
999 *
1000 * Should be called by network driver after the interface has been given a
1001 * MAC address.
1002 */
1003int wlp_wss_setup(struct net_device *net_dev, struct wlp_wss *wss)
1004{
1005 struct wlp *wlp = container_of(wss, struct wlp, wss);
1006 struct device *dev = &wlp->rc->uwb_dev.dev;
1007 int result = 0;
1008 d_fnstart(5, dev, "wss (%p) \n", wss);
1009 mutex_lock(&wss->mutex);
1010 wss->kobj.parent = &net_dev->dev.kobj;
1011 if (!is_valid_ether_addr(net_dev->dev_addr)) {
1012 dev_err(dev, "WLP: Invalid MAC address. Cannot use for"
1013 "virtual.\n");
1014 result = -EINVAL;
1015 goto out;
1016 }
1017 memcpy(wss->virtual_addr.data, net_dev->dev_addr,
1018 sizeof(wss->virtual_addr.data));
1019out:
1020 mutex_unlock(&wss->mutex);
1021 d_fnend(5, dev, "wss (%p) \n", wss);
1022 return result;
1023}
1024EXPORT_SYMBOL_GPL(wlp_wss_setup);
1025
1026/**
1027 * Remove WSS
1028 *
1029 * Called by client that configured WSS through wlp_wss_setup(). This
1030 * function is called when client no longer needs WSS, eg. client shuts
1031 * down.
1032 *
1033 * We remove the WLP IE from the beacon before initiating local cleanup.
1034 */
1035void wlp_wss_remove(struct wlp_wss *wss)
1036{
1037 struct wlp *wlp = container_of(wss, struct wlp, wss);
1038 struct device *dev = &wlp->rc->uwb_dev.dev;
1039 d_fnstart(5, dev, "wss (%p) \n", wss);
1040 mutex_lock(&wss->mutex);
1041 if (wss->state == WLP_WSS_STATE_ACTIVE)
1042 uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP);
1043 if (wss->state != WLP_WSS_STATE_NONE) {
1044 sysfs_remove_group(&wss->kobj, &wss_attr_group);
1045 kobject_put(&wss->kobj);
1046 }
1047 wss->kobj.parent = NULL;
1048 memset(&wss->virtual_addr, 0, sizeof(wss->virtual_addr));
1049 /* Cleanup EDA cache */
1050 wlp_eda_release(&wlp->eda);
1051 wlp_eda_init(&wlp->eda);
1052 mutex_unlock(&wss->mutex);
1053 d_fnend(5, dev, "wss (%p) \n", wss);
1054}
1055EXPORT_SYMBOL_GPL(wlp_wss_remove);
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index c73b5e2919c6..ada8ad82d993 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -102,7 +102,7 @@ static void w83697ug_select_wd_register(void)
102 102
103 } else { 103 } else {
104 printk(KERN_ERR PFX "No W83697UG/UF could be found\n"); 104 printk(KERN_ERR PFX "No W83697UG/UF could be found\n");
105 return -EIO; 105 return;
106 } 106 }
107 107
108 outb_p(0x07, WDT_EFER); /* point to logical device number reg */ 108 outb_p(0x07, WDT_EFER); /* point to logical device number reg */
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 041c52692284..68bf2af6c389 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -178,7 +178,7 @@ v9fs_file_read(struct file *filp, char __user *udata, size_t count,
178 int ret; 178 int ret;
179 struct p9_fid *fid; 179 struct p9_fid *fid;
180 180
181 P9_DPRINTK(P9_DEBUG_VFS, "count %d offset %lld\n", count, *offset); 181 P9_DPRINTK(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset);
182 fid = filp->private_data; 182 fid = filp->private_data;
183 183
184 if (count > (fid->clnt->msize - P9_IOHDRSZ)) 184 if (count > (fid->clnt->msize - P9_IOHDRSZ))
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 0d0c70151642..b7394d05ee8e 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -239,7 +239,7 @@ setrsvsz_out:
239 case EXT3_IOC_GROUP_EXTEND: { 239 case EXT3_IOC_GROUP_EXTEND: {
240 ext3_fsblk_t n_blocks_count; 240 ext3_fsblk_t n_blocks_count;
241 struct super_block *sb = inode->i_sb; 241 struct super_block *sb = inode->i_sb;
242 int err; 242 int err, err2;
243 243
244 if (!capable(CAP_SYS_RESOURCE)) 244 if (!capable(CAP_SYS_RESOURCE))
245 return -EPERM; 245 return -EPERM;
@@ -254,8 +254,10 @@ setrsvsz_out:
254 } 254 }
255 err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count); 255 err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count);
256 journal_lock_updates(EXT3_SB(sb)->s_journal); 256 journal_lock_updates(EXT3_SB(sb)->s_journal);
257 journal_flush(EXT3_SB(sb)->s_journal); 257 err2 = journal_flush(EXT3_SB(sb)->s_journal);
258 journal_unlock_updates(EXT3_SB(sb)->s_journal); 258 journal_unlock_updates(EXT3_SB(sb)->s_journal);
259 if (err == 0)
260 err = err2;
259group_extend_out: 261group_extend_out:
260 mnt_drop_write(filp->f_path.mnt); 262 mnt_drop_write(filp->f_path.mnt);
261 return err; 263 return err;
@@ -263,7 +265,7 @@ group_extend_out:
263 case EXT3_IOC_GROUP_ADD: { 265 case EXT3_IOC_GROUP_ADD: {
264 struct ext3_new_group_data input; 266 struct ext3_new_group_data input;
265 struct super_block *sb = inode->i_sb; 267 struct super_block *sb = inode->i_sb;
266 int err; 268 int err, err2;
267 269
268 if (!capable(CAP_SYS_RESOURCE)) 270 if (!capable(CAP_SYS_RESOURCE))
269 return -EPERM; 271 return -EPERM;
@@ -280,8 +282,10 @@ group_extend_out:
280 282
281 err = ext3_group_add(sb, &input); 283 err = ext3_group_add(sb, &input);
282 journal_lock_updates(EXT3_SB(sb)->s_journal); 284 journal_lock_updates(EXT3_SB(sb)->s_journal);
283 journal_flush(EXT3_SB(sb)->s_journal); 285 err2 = journal_flush(EXT3_SB(sb)->s_journal);
284 journal_unlock_updates(EXT3_SB(sb)->s_journal); 286 journal_unlock_updates(EXT3_SB(sb)->s_journal);
287 if (err == 0)
288 err = err2;
285group_add_out: 289group_add_out:
286 mnt_drop_write(filp->f_path.mnt); 290 mnt_drop_write(filp->f_path.mnt);
287 return err; 291 return err;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 3a260af5544d..cac29ee3b14a 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -393,7 +393,8 @@ static void ext3_put_super (struct super_block * sb)
393 int i; 393 int i;
394 394
395 ext3_xattr_put_super(sb); 395 ext3_xattr_put_super(sb);
396 journal_destroy(sbi->s_journal); 396 if (journal_destroy(sbi->s_journal) < 0)
397 ext3_abort(sb, __func__, "Couldn't clean up the journal");
397 if (!(sb->s_flags & MS_RDONLY)) { 398 if (!(sb->s_flags & MS_RDONLY)) {
398 EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); 399 EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
399 es->s_state = cpu_to_le16(sbi->s_mount_state); 400 es->s_state = cpu_to_le16(sbi->s_mount_state);
@@ -2296,7 +2297,9 @@ static void ext3_mark_recovery_complete(struct super_block * sb,
2296 journal_t *journal = EXT3_SB(sb)->s_journal; 2297 journal_t *journal = EXT3_SB(sb)->s_journal;
2297 2298
2298 journal_lock_updates(journal); 2299 journal_lock_updates(journal);
2299 journal_flush(journal); 2300 if (journal_flush(journal) < 0)
2301 goto out;
2302
2300 lock_super(sb); 2303 lock_super(sb);
2301 if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && 2304 if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) &&
2302 sb->s_flags & MS_RDONLY) { 2305 sb->s_flags & MS_RDONLY) {
@@ -2305,6 +2308,8 @@ static void ext3_mark_recovery_complete(struct super_block * sb,
2305 ext3_commit_super(sb, es, 1); 2308 ext3_commit_super(sb, es, 1);
2306 } 2309 }
2307 unlock_super(sb); 2310 unlock_super(sb);
2311
2312out:
2308 journal_unlock_updates(journal); 2313 journal_unlock_updates(journal);
2309} 2314}
2310 2315
@@ -2404,7 +2409,13 @@ static void ext3_write_super_lockfs(struct super_block *sb)
2404 2409
2405 /* Now we set up the journal barrier. */ 2410 /* Now we set up the journal barrier. */
2406 journal_lock_updates(journal); 2411 journal_lock_updates(journal);
2407 journal_flush(journal); 2412
2413 /*
2414 * We don't want to clear needs_recovery flag when we failed
2415 * to flush the journal.
2416 */
2417 if (journal_flush(journal) < 0)
2418 return;
2408 2419
2409 /* Journal blocked and flushed, clear needs_recovery flag. */ 2420 /* Journal blocked and flushed, clear needs_recovery flag. */
2410 EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); 2421 EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
@@ -2822,8 +2833,12 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
2822 * otherwise be livelocked... 2833 * otherwise be livelocked...
2823 */ 2834 */
2824 journal_lock_updates(EXT3_SB(sb)->s_journal); 2835 journal_lock_updates(EXT3_SB(sb)->s_journal);
2825 journal_flush(EXT3_SB(sb)->s_journal); 2836 err = journal_flush(EXT3_SB(sb)->s_journal);
2826 journal_unlock_updates(EXT3_SB(sb)->s_journal); 2837 journal_unlock_updates(EXT3_SB(sb)->s_journal);
2838 if (err) {
2839 path_put(&nd.path);
2840 return err;
2841 }
2827 } 2842 }
2828 2843
2829 err = vfs_quota_on_path(sb, type, format_id, &nd.path); 2844 err = vfs_quota_on_path(sb, type, format_id, &nd.path);
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index a5432bbbfb88..1bd8d4acc6f2 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -93,7 +93,8 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
93 int ret = 0; 93 int ret = 0;
94 struct buffer_head *bh = jh2bh(jh); 94 struct buffer_head *bh = jh2bh(jh);
95 95
96 if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh)) { 96 if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
97 !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
97 JBUFFER_TRACE(jh, "remove from checkpoint list"); 98 JBUFFER_TRACE(jh, "remove from checkpoint list");
98 ret = __journal_remove_checkpoint(jh) + 1; 99 ret = __journal_remove_checkpoint(jh) + 1;
99 jbd_unlock_bh_state(bh); 100 jbd_unlock_bh_state(bh);
@@ -126,14 +127,29 @@ void __log_wait_for_space(journal_t *journal)
126 127
127 /* 128 /*
128 * Test again, another process may have checkpointed while we 129 * Test again, another process may have checkpointed while we
129 * were waiting for the checkpoint lock 130 * were waiting for the checkpoint lock. If there are no
131 * outstanding transactions there is nothing to checkpoint and
132 * we can't make progress. Abort the journal in this case.
130 */ 133 */
131 spin_lock(&journal->j_state_lock); 134 spin_lock(&journal->j_state_lock);
135 spin_lock(&journal->j_list_lock);
132 nblocks = jbd_space_needed(journal); 136 nblocks = jbd_space_needed(journal);
133 if (__log_space_left(journal) < nblocks) { 137 if (__log_space_left(journal) < nblocks) {
138 int chkpt = journal->j_checkpoint_transactions != NULL;
139
140 spin_unlock(&journal->j_list_lock);
134 spin_unlock(&journal->j_state_lock); 141 spin_unlock(&journal->j_state_lock);
135 log_do_checkpoint(journal); 142 if (chkpt) {
143 log_do_checkpoint(journal);
144 } else {
145 printk(KERN_ERR "%s: no transactions\n",
146 __func__);
147 journal_abort(journal, 0);
148 }
149
136 spin_lock(&journal->j_state_lock); 150 spin_lock(&journal->j_state_lock);
151 } else {
152 spin_unlock(&journal->j_list_lock);
137 } 153 }
138 mutex_unlock(&journal->j_checkpoint_mutex); 154 mutex_unlock(&journal->j_checkpoint_mutex);
139 } 155 }
@@ -160,21 +176,25 @@ static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
160 * buffers. Note that we take the buffers in the opposite ordering 176 * buffers. Note that we take the buffers in the opposite ordering
161 * from the one in which they were submitted for IO. 177 * from the one in which they were submitted for IO.
162 * 178 *
179 * Return 0 on success, and return <0 if some buffers have failed
180 * to be written out.
181 *
163 * Called with j_list_lock held. 182 * Called with j_list_lock held.
164 */ 183 */
165static void __wait_cp_io(journal_t *journal, transaction_t *transaction) 184static int __wait_cp_io(journal_t *journal, transaction_t *transaction)
166{ 185{
167 struct journal_head *jh; 186 struct journal_head *jh;
168 struct buffer_head *bh; 187 struct buffer_head *bh;
169 tid_t this_tid; 188 tid_t this_tid;
170 int released = 0; 189 int released = 0;
190 int ret = 0;
171 191
172 this_tid = transaction->t_tid; 192 this_tid = transaction->t_tid;
173restart: 193restart:
174 /* Did somebody clean up the transaction in the meanwhile? */ 194 /* Did somebody clean up the transaction in the meanwhile? */
175 if (journal->j_checkpoint_transactions != transaction || 195 if (journal->j_checkpoint_transactions != transaction ||
176 transaction->t_tid != this_tid) 196 transaction->t_tid != this_tid)
177 return; 197 return ret;
178 while (!released && transaction->t_checkpoint_io_list) { 198 while (!released && transaction->t_checkpoint_io_list) {
179 jh = transaction->t_checkpoint_io_list; 199 jh = transaction->t_checkpoint_io_list;
180 bh = jh2bh(jh); 200 bh = jh2bh(jh);
@@ -194,6 +214,9 @@ restart:
194 spin_lock(&journal->j_list_lock); 214 spin_lock(&journal->j_list_lock);
195 goto restart; 215 goto restart;
196 } 216 }
217 if (unlikely(buffer_write_io_error(bh)))
218 ret = -EIO;
219
197 /* 220 /*
198 * Now in whatever state the buffer currently is, we know that 221 * Now in whatever state the buffer currently is, we know that
199 * it has been written out and so we can drop it from the list 222 * it has been written out and so we can drop it from the list
@@ -203,6 +226,8 @@ restart:
203 journal_remove_journal_head(bh); 226 journal_remove_journal_head(bh);
204 __brelse(bh); 227 __brelse(bh);
205 } 228 }
229
230 return ret;
206} 231}
207 232
208#define NR_BATCH 64 233#define NR_BATCH 64
@@ -226,7 +251,8 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
226 * Try to flush one buffer from the checkpoint list to disk. 251 * Try to flush one buffer from the checkpoint list to disk.
227 * 252 *
228 * Return 1 if something happened which requires us to abort the current 253 * Return 1 if something happened which requires us to abort the current
229 * scan of the checkpoint list. 254 * scan of the checkpoint list. Return <0 if the buffer has failed to
255 * be written out.
230 * 256 *
231 * Called with j_list_lock held and drops it if 1 is returned 257 * Called with j_list_lock held and drops it if 1 is returned
232 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it 258 * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
@@ -256,6 +282,9 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
256 log_wait_commit(journal, tid); 282 log_wait_commit(journal, tid);
257 ret = 1; 283 ret = 1;
258 } else if (!buffer_dirty(bh)) { 284 } else if (!buffer_dirty(bh)) {
285 ret = 1;
286 if (unlikely(buffer_write_io_error(bh)))
287 ret = -EIO;
259 J_ASSERT_JH(jh, !buffer_jbddirty(bh)); 288 J_ASSERT_JH(jh, !buffer_jbddirty(bh));
260 BUFFER_TRACE(bh, "remove from checkpoint"); 289 BUFFER_TRACE(bh, "remove from checkpoint");
261 __journal_remove_checkpoint(jh); 290 __journal_remove_checkpoint(jh);
@@ -263,7 +292,6 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
263 jbd_unlock_bh_state(bh); 292 jbd_unlock_bh_state(bh);
264 journal_remove_journal_head(bh); 293 journal_remove_journal_head(bh);
265 __brelse(bh); 294 __brelse(bh);
266 ret = 1;
267 } else { 295 } else {
268 /* 296 /*
269 * Important: we are about to write the buffer, and 297 * Important: we are about to write the buffer, and
@@ -295,6 +323,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
295 * to disk. We submit larger chunks of data at once. 323 * to disk. We submit larger chunks of data at once.
296 * 324 *
297 * The journal should be locked before calling this function. 325 * The journal should be locked before calling this function.
326 * Called with j_checkpoint_mutex held.
298 */ 327 */
299int log_do_checkpoint(journal_t *journal) 328int log_do_checkpoint(journal_t *journal)
300{ 329{
@@ -318,6 +347,7 @@ int log_do_checkpoint(journal_t *journal)
318 * OK, we need to start writing disk blocks. Take one transaction 347 * OK, we need to start writing disk blocks. Take one transaction
319 * and write it. 348 * and write it.
320 */ 349 */
350 result = 0;
321 spin_lock(&journal->j_list_lock); 351 spin_lock(&journal->j_list_lock);
322 if (!journal->j_checkpoint_transactions) 352 if (!journal->j_checkpoint_transactions)
323 goto out; 353 goto out;
@@ -334,7 +364,7 @@ restart:
334 int batch_count = 0; 364 int batch_count = 0;
335 struct buffer_head *bhs[NR_BATCH]; 365 struct buffer_head *bhs[NR_BATCH];
336 struct journal_head *jh; 366 struct journal_head *jh;
337 int retry = 0; 367 int retry = 0, err;
338 368
339 while (!retry && transaction->t_checkpoint_list) { 369 while (!retry && transaction->t_checkpoint_list) {
340 struct buffer_head *bh; 370 struct buffer_head *bh;
@@ -347,6 +377,8 @@ restart:
347 break; 377 break;
348 } 378 }
349 retry = __process_buffer(journal, jh, bhs,&batch_count); 379 retry = __process_buffer(journal, jh, bhs,&batch_count);
380 if (retry < 0 && !result)
381 result = retry;
350 if (!retry && (need_resched() || 382 if (!retry && (need_resched() ||
351 spin_needbreak(&journal->j_list_lock))) { 383 spin_needbreak(&journal->j_list_lock))) {
352 spin_unlock(&journal->j_list_lock); 384 spin_unlock(&journal->j_list_lock);
@@ -371,14 +403,18 @@ restart:
371 * Now we have cleaned up the first transaction's checkpoint 403 * Now we have cleaned up the first transaction's checkpoint
372 * list. Let's clean up the second one 404 * list. Let's clean up the second one
373 */ 405 */
374 __wait_cp_io(journal, transaction); 406 err = __wait_cp_io(journal, transaction);
407 if (!result)
408 result = err;
375 } 409 }
376out: 410out:
377 spin_unlock(&journal->j_list_lock); 411 spin_unlock(&journal->j_list_lock);
378 result = cleanup_journal_tail(journal);
379 if (result < 0) 412 if (result < 0)
380 return result; 413 journal_abort(journal, result);
381 return 0; 414 else
415 result = cleanup_journal_tail(journal);
416
417 return (result < 0) ? result : 0;
382} 418}
383 419
384/* 420/*
@@ -394,8 +430,9 @@ out:
394 * This is the only part of the journaling code which really needs to be 430 * This is the only part of the journaling code which really needs to be
395 * aware of transaction aborts. Checkpointing involves writing to the 431 * aware of transaction aborts. Checkpointing involves writing to the
396 * main filesystem area rather than to the journal, so it can proceed 432 * main filesystem area rather than to the journal, so it can proceed
397 * even in abort state, but we must not update the journal superblock if 433 * even in abort state, but we must not update the super block if
398 * we have an abort error outstanding. 434 * checkpointing may have failed. Otherwise, we would lose some metadata
435 * buffers which should be written-back to the filesystem.
399 */ 436 */
400 437
401int cleanup_journal_tail(journal_t *journal) 438int cleanup_journal_tail(journal_t *journal)
@@ -404,6 +441,9 @@ int cleanup_journal_tail(journal_t *journal)
404 tid_t first_tid; 441 tid_t first_tid;
405 unsigned long blocknr, freed; 442 unsigned long blocknr, freed;
406 443
444 if (is_journal_aborted(journal))
445 return 1;
446
407 /* OK, work out the oldest transaction remaining in the log, and 447 /* OK, work out the oldest transaction remaining in the log, and
408 * the log block it starts at. 448 * the log block it starts at.
409 * 449 *
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index aa7143a8349b..9e4fa52d7dc8 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1121,9 +1121,12 @@ recovery_error:
1121 * 1121 *
1122 * Release a journal_t structure once it is no longer in use by the 1122 * Release a journal_t structure once it is no longer in use by the
1123 * journaled object. 1123 * journaled object.
1124 * Return <0 if we couldn't clean up the journal.
1124 */ 1125 */
1125void journal_destroy(journal_t *journal) 1126int journal_destroy(journal_t *journal)
1126{ 1127{
1128 int err = 0;
1129
1127 /* Wait for the commit thread to wake up and die. */ 1130 /* Wait for the commit thread to wake up and die. */
1128 journal_kill_thread(journal); 1131 journal_kill_thread(journal);
1129 1132
@@ -1146,11 +1149,16 @@ void journal_destroy(journal_t *journal)
1146 J_ASSERT(journal->j_checkpoint_transactions == NULL); 1149 J_ASSERT(journal->j_checkpoint_transactions == NULL);
1147 spin_unlock(&journal->j_list_lock); 1150 spin_unlock(&journal->j_list_lock);
1148 1151
1149 /* We can now mark the journal as empty. */
1150 journal->j_tail = 0;
1151 journal->j_tail_sequence = ++journal->j_transaction_sequence;
1152 if (journal->j_sb_buffer) { 1152 if (journal->j_sb_buffer) {
1153 journal_update_superblock(journal, 1); 1153 if (!is_journal_aborted(journal)) {
1154 /* We can now mark the journal as empty. */
1155 journal->j_tail = 0;
1156 journal->j_tail_sequence =
1157 ++journal->j_transaction_sequence;
1158 journal_update_superblock(journal, 1);
1159 } else {
1160 err = -EIO;
1161 }
1154 brelse(journal->j_sb_buffer); 1162 brelse(journal->j_sb_buffer);
1155 } 1163 }
1156 1164
@@ -1160,6 +1168,8 @@ void journal_destroy(journal_t *journal)
1160 journal_destroy_revoke(journal); 1168 journal_destroy_revoke(journal);
1161 kfree(journal->j_wbuf); 1169 kfree(journal->j_wbuf);
1162 kfree(journal); 1170 kfree(journal);
1171
1172 return err;
1163} 1173}
1164 1174
1165 1175
@@ -1359,10 +1369,16 @@ int journal_flush(journal_t *journal)
1359 spin_lock(&journal->j_list_lock); 1369 spin_lock(&journal->j_list_lock);
1360 while (!err && journal->j_checkpoint_transactions != NULL) { 1370 while (!err && journal->j_checkpoint_transactions != NULL) {
1361 spin_unlock(&journal->j_list_lock); 1371 spin_unlock(&journal->j_list_lock);
1372 mutex_lock(&journal->j_checkpoint_mutex);
1362 err = log_do_checkpoint(journal); 1373 err = log_do_checkpoint(journal);
1374 mutex_unlock(&journal->j_checkpoint_mutex);
1363 spin_lock(&journal->j_list_lock); 1375 spin_lock(&journal->j_list_lock);
1364 } 1376 }
1365 spin_unlock(&journal->j_list_lock); 1377 spin_unlock(&journal->j_list_lock);
1378
1379 if (is_journal_aborted(journal))
1380 return -EIO;
1381
1366 cleanup_journal_tail(journal); 1382 cleanup_journal_tail(journal);
1367 1383
1368 /* Finally, mark the journal as really needing no recovery. 1384 /* Finally, mark the journal as really needing no recovery.
@@ -1384,7 +1400,7 @@ int journal_flush(journal_t *journal)
1384 J_ASSERT(journal->j_head == journal->j_tail); 1400 J_ASSERT(journal->j_head == journal->j_tail);
1385 J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence); 1401 J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
1386 spin_unlock(&journal->j_state_lock); 1402 spin_unlock(&journal->j_state_lock);
1387 return err; 1403 return 0;
1388} 1404}
1389 1405
1390/** 1406/**
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index 43bc5e5ed064..db5e982c5ddf 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -223,7 +223,7 @@ do { \
223 */ 223 */
224int journal_recover(journal_t *journal) 224int journal_recover(journal_t *journal)
225{ 225{
226 int err; 226 int err, err2;
227 journal_superblock_t * sb; 227 journal_superblock_t * sb;
228 228
229 struct recovery_info info; 229 struct recovery_info info;
@@ -261,7 +261,10 @@ int journal_recover(journal_t *journal)
261 journal->j_transaction_sequence = ++info.end_transaction; 261 journal->j_transaction_sequence = ++info.end_transaction;
262 262
263 journal_clear_revoke(journal); 263 journal_clear_revoke(journal);
264 sync_blockdev(journal->j_fs_dev); 264 err2 = sync_blockdev(journal->j_fs_dev);
265 if (!err)
266 err = err2;
267
265 return err; 268 return err;
266} 269}
267 270
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 9dc036f18356..5cd882b8871a 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -99,7 +99,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
99 int fsidtype; 99 int fsidtype;
100 char *ep; 100 char *ep;
101 struct svc_expkey key; 101 struct svc_expkey key;
102 struct svc_expkey *ek; 102 struct svc_expkey *ek = NULL;
103 103
104 if (mesg[mlen-1] != '\n') 104 if (mesg[mlen-1] != '\n')
105 return -EINVAL; 105 return -EINVAL;
@@ -107,7 +107,8 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
107 107
108 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 108 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
109 err = -ENOMEM; 109 err = -ENOMEM;
110 if (!buf) goto out; 110 if (!buf)
111 goto out;
111 112
112 err = -EINVAL; 113 err = -EINVAL;
113 if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0) 114 if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
@@ -151,16 +152,16 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
151 152
152 /* now we want a pathname, or empty meaning NEGATIVE */ 153 /* now we want a pathname, or empty meaning NEGATIVE */
153 err = -EINVAL; 154 err = -EINVAL;
154 if ((len=qword_get(&mesg, buf, PAGE_SIZE)) < 0) 155 len = qword_get(&mesg, buf, PAGE_SIZE);
156 if (len < 0)
155 goto out; 157 goto out;
156 dprintk("Path seems to be <%s>\n", buf); 158 dprintk("Path seems to be <%s>\n", buf);
157 err = 0; 159 err = 0;
158 if (len == 0) { 160 if (len == 0) {
159 set_bit(CACHE_NEGATIVE, &key.h.flags); 161 set_bit(CACHE_NEGATIVE, &key.h.flags);
160 ek = svc_expkey_update(&key, ek); 162 ek = svc_expkey_update(&key, ek);
161 if (ek) 163 if (!ek)
162 cache_put(&ek->h, &svc_expkey_cache); 164 err = -ENOMEM;
163 else err = -ENOMEM;
164 } else { 165 } else {
165 struct nameidata nd; 166 struct nameidata nd;
166 err = path_lookup(buf, 0, &nd); 167 err = path_lookup(buf, 0, &nd);
@@ -171,14 +172,14 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
171 key.ek_path = nd.path; 172 key.ek_path = nd.path;
172 173
173 ek = svc_expkey_update(&key, ek); 174 ek = svc_expkey_update(&key, ek);
174 if (ek) 175 if (!ek)
175 cache_put(&ek->h, &svc_expkey_cache);
176 else
177 err = -ENOMEM; 176 err = -ENOMEM;
178 path_put(&nd.path); 177 path_put(&nd.path);
179 } 178 }
180 cache_flush(); 179 cache_flush();
181 out: 180 out:
181 if (ek)
182 cache_put(&ek->h, &svc_expkey_cache);
182 if (dom) 183 if (dom)
183 auth_domain_put(dom); 184 auth_domain_put(dom);
184 kfree(buf); 185 kfree(buf);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 59eeb46f82c5..07e4f5d7baa8 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -249,6 +249,10 @@ static int nfsd_init_socks(int port)
249 if (error < 0) 249 if (error < 0)
250 return error; 250 return error;
251 251
252 error = lockd_up();
253 if (error < 0)
254 return error;
255
252 error = svc_create_xprt(nfsd_serv, "tcp", port, 256 error = svc_create_xprt(nfsd_serv, "tcp", port,
253 SVC_SOCK_DEFAULTS); 257 SVC_SOCK_DEFAULTS);
254 if (error < 0) 258 if (error < 0)
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index aa1d0d6489a1..9609eb51d727 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -410,6 +410,7 @@ out_nfserr:
410static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf) 410static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
411{ 411{
412 ssize_t buflen; 412 ssize_t buflen;
413 ssize_t ret;
413 414
414 buflen = vfs_getxattr(dentry, key, NULL, 0); 415 buflen = vfs_getxattr(dentry, key, NULL, 0);
415 if (buflen <= 0) 416 if (buflen <= 0)
@@ -419,7 +420,10 @@ static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
419 if (!*buf) 420 if (!*buf)
420 return -ENOMEM; 421 return -ENOMEM;
421 422
422 return vfs_getxattr(dentry, key, *buf, buflen); 423 ret = vfs_getxattr(dentry, key, *buf, buflen);
424 if (ret < 0)
425 kfree(*buf);
426 return ret;
423} 427}
424#endif 428#endif
425 429
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index ed108be6743f..f104af7cf437 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -22,8 +22,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
22{ 22{
23 if (unlikely(atomic_dec_return(count) < 0)) 23 if (unlikely(atomic_dec_return(count) < 0))
24 fail_fn(count); 24 fail_fn(count);
25 else
26 smp_mb();
27} 25}
28 26
29/** 27/**
@@ -41,10 +39,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
41{ 39{
42 if (unlikely(atomic_dec_return(count) < 0)) 40 if (unlikely(atomic_dec_return(count) < 0))
43 return fail_fn(count); 41 return fail_fn(count);
44 else { 42 return 0;
45 smp_mb();
46 return 0;
47 }
48} 43}
49 44
50/** 45/**
@@ -63,7 +58,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
63static inline void 58static inline void
64__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) 59__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
65{ 60{
66 smp_mb();
67 if (unlikely(atomic_inc_return(count) <= 0)) 61 if (unlikely(atomic_inc_return(count) <= 0))
68 fail_fn(count); 62 fail_fn(count);
69} 63}
@@ -88,25 +82,9 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
88static inline int 82static inline int
89__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 83__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
90{ 84{
91 /* 85 if (likely(atomic_cmpxchg(count, 1, 0) == 1))
92 * We have two variants here. The cmpxchg based one is the best one
93 * because it never induce a false contention state. It is included
94 * here because architectures using the inc/dec algorithms over the
95 * xchg ones are much more likely to support cmpxchg natively.
96 *
97 * If not we fall back to the spinlock based variant - that is
98 * just as efficient (and simpler) as a 'destructive' probing of
99 * the mutex state would be.
100 */
101#ifdef __HAVE_ARCH_CMPXCHG
102 if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
103 smp_mb();
104 return 1; 86 return 1;
105 }
106 return 0; 87 return 0;
107#else
108 return fail_fn(count);
109#endif
110} 88}
111 89
112#endif 90#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 7b9cd2cbfebe..580a6d35c700 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -27,8 +27,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
27{ 27{
28 if (unlikely(atomic_xchg(count, 0) != 1)) 28 if (unlikely(atomic_xchg(count, 0) != 1))
29 fail_fn(count); 29 fail_fn(count);
30 else
31 smp_mb();
32} 30}
33 31
34/** 32/**
@@ -46,10 +44,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
46{ 44{
47 if (unlikely(atomic_xchg(count, 0) != 1)) 45 if (unlikely(atomic_xchg(count, 0) != 1))
48 return fail_fn(count); 46 return fail_fn(count);
49 else { 47 return 0;
50 smp_mb();
51 return 0;
52 }
53} 48}
54 49
55/** 50/**
@@ -67,7 +62,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
67static inline void 62static inline void
68__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) 63__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
69{ 64{
70 smp_mb();
71 if (unlikely(atomic_xchg(count, 1) != 0)) 65 if (unlikely(atomic_xchg(count, 1) != 0))
72 fail_fn(count); 66 fail_fn(count);
73} 67}
@@ -110,7 +104,6 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
110 if (prev < 0) 104 if (prev < 0)
111 prev = 0; 105 prev = 0;
112 } 106 }
113 smp_mb();
114 107
115 return prev; 108 return prev;
116} 109}
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index 961e746da977..2daaffcda52f 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -7,9 +7,13 @@ extern struct dma_mapping_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 7extern int force_iommu, no_iommu;
8extern int iommu_detected; 8extern int iommu_detected;
9extern int dmar_disabled; 9extern int dmar_disabled;
10extern int forbid_dac;
10 11
11extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); 12extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len);
12 13
14/* 10 seconds */
15#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
16
13#ifdef CONFIG_GART_IOMMU 17#ifdef CONFIG_GART_IOMMU
14extern int gart_iommu_aperture; 18extern int gart_iommu_aperture;
15extern int gart_iommu_aperture_allowed; 19extern int gart_iommu_aperture_allowed;
diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h
index 47c3616ea9ac..07b7299dab20 100644
--- a/include/asm-xtensa/io.h
+++ b/include/asm-xtensa/io.h
@@ -18,10 +18,12 @@
18 18
19#include <linux/types.h> 19#include <linux/types.h>
20 20
21#define XCHAL_KIO_CACHED_VADDR 0xf0000000 21#define XCHAL_KIO_CACHED_VADDR 0xe0000000
22#define XCHAL_KIO_BYPASS_VADDR 0xf8000000 22#define XCHAL_KIO_BYPASS_VADDR 0xf0000000
23#define XCHAL_KIO_PADDR 0xf0000000 23#define XCHAL_KIO_PADDR 0xf0000000
24#define XCHAL_KIO_SIZE 0x08000000 24#define XCHAL_KIO_SIZE 0x10000000
25
26#define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x))
25 27
26/* 28/*
27 * swap functions to change byte order from little-endian to big-endian and 29 * swap functions to change byte order from little-endian to big-endian and
diff --git a/include/asm-xtensa/rwsem.h b/include/asm-xtensa/rwsem.h
index 0aad3a587551..e39edf5c86f2 100644
--- a/include/asm-xtensa/rwsem.h
+++ b/include/asm-xtensa/rwsem.h
@@ -13,6 +13,10 @@
13#ifndef _XTENSA_RWSEM_H 13#ifndef _XTENSA_RWSEM_H
14#define _XTENSA_RWSEM_H 14#define _XTENSA_RWSEM_H
15 15
16#ifndef _LINUX_RWSEM_H
17#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
18#endif
19
16#include <linux/list.h> 20#include <linux/list.h>
17#include <linux/spinlock.h> 21#include <linux/spinlock.h>
18#include <asm/atomic.h> 22#include <asm/atomic.h>
diff --git a/include/asm-xtensa/variant-dc232b/core.h b/include/asm-xtensa/variant-dc232b/core.h
new file mode 100644
index 000000000000..525bd3d90154
--- /dev/null
+++ b/include/asm-xtensa/variant-dc232b/core.h
@@ -0,0 +1,424 @@
1/*
2 * Xtensa processor core configuration information.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (c) 1999-2007 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_CORE_CONFIGURATION_H
12#define _XTENSA_CORE_CONFIGURATION_H
13
14
15/****************************************************************************
16 Parameters Useful for Any Code, USER or PRIVILEGED
17 ****************************************************************************/
18
19/*
20 * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
21 * configured, and a value of 0 otherwise. These macros are always defined.
22 */
23
24
25/*----------------------------------------------------------------------
26 ISA
27 ----------------------------------------------------------------------*/
28
29#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
30#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
31#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
32#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
33#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
34#define XCHAL_HAVE_DEBUG 1 /* debug option */
35#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
36#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
37#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
38#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
39#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
40#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
41#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
42#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
43#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
44#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
45#define XCHAL_HAVE_L32R 1 /* L32R instruction */
46#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
47#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
48#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
49#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
50#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
51#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
52#define XCHAL_HAVE_ABS 1 /* ABS instruction */
53/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
54/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
55#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
56#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
57#define XCHAL_HAVE_SPECULATION 0 /* speculation */
58#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
59#define XCHAL_NUM_CONTEXTS 1 /* */
60#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
61#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
62#define XCHAL_HAVE_PRID 1 /* processor ID register */
63#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
64#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
65#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
66#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
67#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
68#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
69#define XCHAL_HAVE_FP 0 /* floating point pkg */
70#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
71#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
72#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
73
74
75/*----------------------------------------------------------------------
76 MISC
77 ----------------------------------------------------------------------*/
78
79#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
80#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
81#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
82/* In T1050, applies to selected core load and store instructions (see ISA): */
83#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
84#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
85
86#define XCHAL_SW_VERSION 701001 /* sw version of this header */
87
88#define XCHAL_CORE_ID "dc232b" /* alphanum core name
89 (CoreID) set in the Xtensa
90 Processor Generator */
91
92#define XCHAL_CORE_DESCRIPTION "Diamond 232L Standard Core Rev.B (LE)"
93#define XCHAL_BUILD_UNIQUE_ID 0x0000BEEF /* 22-bit sw build ID */
94
95/*
96 * These definitions describe the hardware targeted by this software.
97 */
98#define XCHAL_HW_CONFIGID0 0xC56307FE /* ConfigID hi 32 bits*/
99#define XCHAL_HW_CONFIGID1 0x0D40BEEF /* ConfigID lo 32 bits*/
100#define XCHAL_HW_VERSION_NAME "LX2.1.1" /* full version name */
101#define XCHAL_HW_VERSION_MAJOR 2210 /* major ver# of targeted hw */
102#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
103#define XCHAL_HW_VERSION 221001 /* major*100+minor */
104#define XCHAL_HW_REL_LX2 1
105#define XCHAL_HW_REL_LX2_1 1
106#define XCHAL_HW_REL_LX2_1_1 1
107#define XCHAL_HW_CONFIGID_RELIABLE 1
108/* If software targets a *range* of hardware versions, these are the bounds: */
109#define XCHAL_HW_MIN_VERSION_MAJOR 2210 /* major v of earliest tgt hw */
110#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
111#define XCHAL_HW_MIN_VERSION 221001 /* earliest targeted hw */
112#define XCHAL_HW_MAX_VERSION_MAJOR 2210 /* major v of latest tgt hw */
113#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
114#define XCHAL_HW_MAX_VERSION 221001 /* latest targeted hw */
115
116
117/*----------------------------------------------------------------------
118 CACHE
119 ----------------------------------------------------------------------*/
120
121#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
122#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
123#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
124#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
125
126#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
127#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
128
129#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
130
131
132
133
134/****************************************************************************
135 Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
136 ****************************************************************************/
137
138
139#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
140
141/*----------------------------------------------------------------------
142 CACHE
143 ----------------------------------------------------------------------*/
144
145#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
146
147/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
148
149/* Number of cache sets in log2(lines per way): */
150#define XCHAL_ICACHE_SETWIDTH 7
151#define XCHAL_DCACHE_SETWIDTH 7
152
153/* Cache set associativity (number of ways): */
154#define XCHAL_ICACHE_WAYS 4
155#define XCHAL_DCACHE_WAYS 4
156
157/* Cache features: */
158#define XCHAL_ICACHE_LINE_LOCKABLE 1
159#define XCHAL_DCACHE_LINE_LOCKABLE 1
160#define XCHAL_ICACHE_ECC_PARITY 0
161#define XCHAL_DCACHE_ECC_PARITY 0
162
163/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
164#define XCHAL_CA_BITS 4
165
166
167/*----------------------------------------------------------------------
168 INTERNAL I/D RAM/ROMs and XLMI
169 ----------------------------------------------------------------------*/
170
171#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
172#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
173#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
174#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
175#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
176#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
177
178
179/*----------------------------------------------------------------------
180 INTERRUPTS and TIMERS
181 ----------------------------------------------------------------------*/
182
183#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
184#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
185#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
186#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
187#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
188#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
189#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
190#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
191#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
192 (not including level zero) */
193#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
194 /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
195
196/* Masks of interrupts at each interrupt level: */
197#define XCHAL_INTLEVEL1_MASK 0x001F80FF
198#define XCHAL_INTLEVEL2_MASK 0x00000100
199#define XCHAL_INTLEVEL3_MASK 0x00200E00
200#define XCHAL_INTLEVEL4_MASK 0x00001000
201#define XCHAL_INTLEVEL5_MASK 0x00002000
202#define XCHAL_INTLEVEL6_MASK 0x00000000
203#define XCHAL_INTLEVEL7_MASK 0x00004000
204
205/* Masks of interrupts at each range 1..n of interrupt levels: */
206#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
207#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
208#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
209#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
210#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
211#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
212#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
213
214/* Level of each interrupt: */
215#define XCHAL_INT0_LEVEL 1
216#define XCHAL_INT1_LEVEL 1
217#define XCHAL_INT2_LEVEL 1
218#define XCHAL_INT3_LEVEL 1
219#define XCHAL_INT4_LEVEL 1
220#define XCHAL_INT5_LEVEL 1
221#define XCHAL_INT6_LEVEL 1
222#define XCHAL_INT7_LEVEL 1
223#define XCHAL_INT8_LEVEL 2
224#define XCHAL_INT9_LEVEL 3
225#define XCHAL_INT10_LEVEL 3
226#define XCHAL_INT11_LEVEL 3
227#define XCHAL_INT12_LEVEL 4
228#define XCHAL_INT13_LEVEL 5
229#define XCHAL_INT14_LEVEL 7
230#define XCHAL_INT15_LEVEL 1
231#define XCHAL_INT16_LEVEL 1
232#define XCHAL_INT17_LEVEL 1
233#define XCHAL_INT18_LEVEL 1
234#define XCHAL_INT19_LEVEL 1
235#define XCHAL_INT20_LEVEL 1
236#define XCHAL_INT21_LEVEL 3
237#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
238#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
239#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
240 EXCSAVE/EPS/EPC_n, RFI n) */
241
242/* Type of each interrupt: */
243#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
244#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
245#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
246#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
247#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
248#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
249#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
250#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
251#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
252#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
253#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
254#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
255#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
256#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
257#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
258#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
259#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
260#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
261#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
262#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
263#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
264#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
265
266/* Masks of interrupts for each type of interrupt: */
267#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
268#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
269#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
270#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
271#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
272#define XCHAL_INTTYPE_MASK_NMI 0x00004000
273#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
274
275/* Interrupt numbers assigned to specific interrupt sources: */
276#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
277#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
278#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
279#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
280#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
281
282/* Interrupt numbers for levels at which only one interrupt is configured: */
283#define XCHAL_INTLEVEL2_NUM 8
284#define XCHAL_INTLEVEL4_NUM 12
285#define XCHAL_INTLEVEL5_NUM 13
286#define XCHAL_INTLEVEL7_NUM 14
287/* (There are many interrupts each at level(s) 1, 3.) */
288
289
290/*
291 * External interrupt vectors/levels.
292 * These macros describe how Xtensa processor interrupt numbers
293 * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
294 * map to external BInterrupt<n> pins, for those interrupts
295 * configured as external (level-triggered, edge-triggered, or NMI).
296 * See the Xtensa processor databook for more details.
297 */
298
299/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
300#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
301#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
302#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
303#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
304#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
305#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
306#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
307#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
308#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
309#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
310#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
311#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
312#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
313#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
314#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
315#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
316#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
317
318
319/*----------------------------------------------------------------------
320 EXCEPTIONS and VECTORS
321 ----------------------------------------------------------------------*/
322
323#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
324 number: 1 == XEA1 (old)
325 2 == XEA2 (new)
326 0 == XEAX (extern) */
327#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
328#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
329#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
330#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
331#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
332#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
333#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
334#define XCHAL_VECBASE_RESET_VADDR 0xD0000000 /* VECBASE reset value */
335#define XCHAL_VECBASE_RESET_PADDR 0x00000000
336#define XCHAL_RESET_VECBASE_OVERLAP 0
337
338#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
339#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
340#define XCHAL_RESET_VECTOR1_VADDR 0xD8000500
341#define XCHAL_RESET_VECTOR1_PADDR 0x00000500
342#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
343#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
344#define XCHAL_USER_VECOFS 0x00000340
345#define XCHAL_USER_VECTOR_VADDR 0xD0000340
346#define XCHAL_USER_VECTOR_PADDR 0x00000340
347#define XCHAL_KERNEL_VECOFS 0x00000300
348#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000300
349#define XCHAL_KERNEL_VECTOR_PADDR 0x00000300
350#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
351#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD00003C0
352#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000003C0
353#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
354#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
355#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
356#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
357#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
358#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
359#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
360#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
361#define XCHAL_INTLEVEL2_VECOFS 0x00000180
362#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000180
363#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000180
364#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
365#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD00001C0
366#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000001C0
367#define XCHAL_INTLEVEL4_VECOFS 0x00000200
368#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xD0000200
369#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00000200
370#define XCHAL_INTLEVEL5_VECOFS 0x00000240
371#define XCHAL_INTLEVEL5_VECTOR_VADDR 0xD0000240
372#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00000240
373#define XCHAL_INTLEVEL6_VECOFS 0x00000280
374#define XCHAL_INTLEVEL6_VECTOR_VADDR 0xD0000280
375#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00000280
376#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
377#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
378#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
379#define XCHAL_NMI_VECOFS 0x000002C0
380#define XCHAL_NMI_VECTOR_VADDR 0xD00002C0
381#define XCHAL_NMI_VECTOR_PADDR 0x000002C0
382#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
383#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
384#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
385
386
387/*----------------------------------------------------------------------
388 DEBUG
389 ----------------------------------------------------------------------*/
390
391#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
392#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
393#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
394#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
395
396
397/*----------------------------------------------------------------------
398 MMU
399 ----------------------------------------------------------------------*/
400
401/* See core-matmap.h header file for more details. */
402
403#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
404#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
405#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
406#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
407#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
408#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
409#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
410 [autorefill] and protection)
411 usable for an MMU-based OS */
412/* If none of the above last 4 are set, it's a custom TLB configuration. */
413#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
414#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
415
416#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
417#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
418#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
419
420#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
421
422
423#endif /* _XTENSA_CORE_CONFIGURATION_H */
424
diff --git a/include/asm-xtensa/variant-dc232b/tie-asm.h b/include/asm-xtensa/variant-dc232b/tie-asm.h
new file mode 100644
index 000000000000..ed4f53f529db
--- /dev/null
+++ b/include/asm-xtensa/variant-dc232b/tie-asm.h
@@ -0,0 +1,122 @@
1/*
2 * This header file contains assembly-language definitions (assembly
3 * macros, etc.) for this specific Xtensa processor's TIE extensions
4 * and options. It is customized to this Xtensa processor configuration.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 1999-2007 Tensilica Inc.
11 */
12
13#ifndef _XTENSA_CORE_TIE_ASM_H
14#define _XTENSA_CORE_TIE_ASM_H
15
16/* Selection parameter values for save-area save/restore macros: */
17/* Option vs. TIE: */
18#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
19#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
20/* Whether used automatically by compiler: */
21#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
22#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
23/* ABI handling across function calls: */
24#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
25#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
26#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
27/* Misc */
28#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
29
30
31
32/* Macro to save all non-coprocessor (extra) custom TIE and optional state
33 * (not including zero-overhead loop registers).
34 * Save area ptr (clobbered): ptr (1 byte aligned)
35 * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
36 */
37 .macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
38 xchal_sa_start \continue, \ofs
39 .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~\select
40 xchal_sa_align \ptr, 0, 1024-8, 4, 4
41 rsr \at1, ACCLO // MAC16 accumulator
42 rsr \at2, ACCHI
43 s32i \at1, \ptr, .Lxchal_ofs_ + 0
44 s32i \at2, \ptr, .Lxchal_ofs_ + 4
45 .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
46 .endif
47 .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
48 xchal_sa_align \ptr, 0, 1024-16, 4, 4
49 rsr \at1, M0 // MAC16 registers
50 rsr \at2, M1
51 s32i \at1, \ptr, .Lxchal_ofs_ + 0
52 s32i \at2, \ptr, .Lxchal_ofs_ + 4
53 rsr \at1, M2
54 rsr \at2, M3
55 s32i \at1, \ptr, .Lxchal_ofs_ + 8
56 s32i \at2, \ptr, .Lxchal_ofs_ + 12
57 .set .Lxchal_ofs_, .Lxchal_ofs_ + 16
58 .endif
59 .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
60 xchal_sa_align \ptr, 0, 1024-4, 4, 4
61 rsr \at1, SCOMPARE1 // conditional store option
62 s32i \at1, \ptr, .Lxchal_ofs_ + 0
63 .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
64 .endif
65 .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
66 xchal_sa_align \ptr, 0, 1024-4, 4, 4
67 rur \at1, THREADPTR // threadptr option
68 s32i \at1, \ptr, .Lxchal_ofs_ + 0
69 .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
70 .endif
71 .endm // xchal_ncp_store
72
73/* Macro to save all non-coprocessor (extra) custom TIE and optional state
74 * (not including zero-overhead loop registers).
75 * Save area ptr (clobbered): ptr (1 byte aligned)
76 * Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
77 */
78 .macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
79 xchal_sa_start \continue, \ofs
80 .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~\select
81 xchal_sa_align \ptr, 0, 1024-8, 4, 4
82 l32i \at1, \ptr, .Lxchal_ofs_ + 0
83 l32i \at2, \ptr, .Lxchal_ofs_ + 4
84 wsr \at1, ACCLO // MAC16 accumulator
85 wsr \at2, ACCHI
86 .set .Lxchal_ofs_, .Lxchal_ofs_ + 8
87 .endif
88 .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
89 xchal_sa_align \ptr, 0, 1024-16, 4, 4
90 l32i \at1, \ptr, .Lxchal_ofs_ + 0
91 l32i \at2, \ptr, .Lxchal_ofs_ + 4
92 wsr \at1, M0 // MAC16 registers
93 wsr \at2, M1
94 l32i \at1, \ptr, .Lxchal_ofs_ + 8
95 l32i \at2, \ptr, .Lxchal_ofs_ + 12
96 wsr \at1, M2
97 wsr \at2, M3
98 .set .Lxchal_ofs_, .Lxchal_ofs_ + 16
99 .endif
100 .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~\select
101 xchal_sa_align \ptr, 0, 1024-4, 4, 4
102 l32i \at1, \ptr, .Lxchal_ofs_ + 0
103 wsr \at1, SCOMPARE1 // conditional store option
104 .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
105 .endif
106 .ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
107 xchal_sa_align \ptr, 0, 1024-4, 4, 4
108 l32i \at1, \ptr, .Lxchal_ofs_ + 0
109 wur \at1, THREADPTR // threadptr option
110 .set .Lxchal_ofs_, .Lxchal_ofs_ + 4
111 .endif
112 .endm // xchal_ncp_load
113
114
115
116#define XCHAL_NCP_NUM_ATMPS 2
117
118
119#define XCHAL_SA_NUM_ATMPS 2
120
121#endif /*_XTENSA_CORE_TIE_ASM_H*/
122
diff --git a/include/asm-xtensa/variant-dc232b/tie.h b/include/asm-xtensa/variant-dc232b/tie.h
new file mode 100644
index 000000000000..018e81af4393
--- /dev/null
+++ b/include/asm-xtensa/variant-dc232b/tie.h
@@ -0,0 +1,131 @@
1/*
2 * This header file describes this specific Xtensa processor's TIE extensions
3 * that extend basic Xtensa core functionality. It is customized to this
4 * Xtensa processor configuration.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 1999-2007 Tensilica Inc.
11 */
12
13#ifndef _XTENSA_CORE_TIE_H
14#define _XTENSA_CORE_TIE_H
15
16#define XCHAL_CP_NUM 1 /* number of coprocessors */
17#define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */
18#define XCHAL_CP_MASK 0x80 /* bitmask of all CPs by ID */
19#define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */
20
21/* Basic parameters of each coprocessor: */
22#define XCHAL_CP7_NAME "XTIOP"
23#define XCHAL_CP7_IDENT XTIOP
24#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
25#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
26#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
27
28/* Filler info for unassigned coprocessors, to simplify arrays etc: */
29#define XCHAL_CP0_SA_SIZE 0
30#define XCHAL_CP0_SA_ALIGN 1
31#define XCHAL_CP1_SA_SIZE 0
32#define XCHAL_CP1_SA_ALIGN 1
33#define XCHAL_CP2_SA_SIZE 0
34#define XCHAL_CP2_SA_ALIGN 1
35#define XCHAL_CP3_SA_SIZE 0
36#define XCHAL_CP3_SA_ALIGN 1
37#define XCHAL_CP4_SA_SIZE 0
38#define XCHAL_CP4_SA_ALIGN 1
39#define XCHAL_CP5_SA_SIZE 0
40#define XCHAL_CP5_SA_ALIGN 1
41#define XCHAL_CP6_SA_SIZE 0
42#define XCHAL_CP6_SA_ALIGN 1
43
44/* Save area for non-coprocessor optional and custom (TIE) state: */
45#define XCHAL_NCP_SA_SIZE 32
46#define XCHAL_NCP_SA_ALIGN 4
47
48/* Total save area for optional and custom state (NCP + CPn): */
49#define XCHAL_TOTAL_SA_SIZE 32 /* with 16-byte align padding */
50#define XCHAL_TOTAL_SA_ALIGN 4 /* actual minimum alignment */
51
52/*
53 * Detailed contents of save areas.
54 * NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
55 * before expanding the XCHAL_xxx_SA_LIST() macros.
56 *
57 * XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
58 * dbnum,base,regnum,bitsz,gapsz,reset,x...)
59 *
60 * s = passed from XCHAL_*_LIST(s), eg. to select how to expand
61 * ccused = set if used by compiler without special options or code
62 * abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
63 * kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
64 * opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
65 * name = lowercase reg name (no quotes)
66 * galign = group byte alignment (power of 2) (galign >= align)
67 * align = register byte alignment (power of 2)
68 * asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
69 * (not including any pad bytes required to galign this or next reg)
70 * dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
71 * base = reg shortname w/o index (or sr=special, ur=TIE user reg)
72 * regnum = reg index in regfile, or special/TIE-user reg number
73 * bitsz = number of significant bits (regfile width, or ur/sr mask bits)
74 * gapsz = intervening bits, if bitsz bits not stored contiguously
75 * (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
76 * reset = register reset value (or 0 if undefined at reset)
77 * x = reserved for future use (0 until then)
78 *
79 * To filter out certain registers, e.g. to expand only the non-global
80 * registers used by the compiler, you can do something like this:
81 *
82 * #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
83 * #define SELCC0(p...)
84 * #define SELCC1(abikind,p...) SELAK##abikind(p)
85 * #define SELAK0(p...) REG(p)
86 * #define SELAK1(p...) REG(p)
87 * #define SELAK2(p...)
88 * #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
89 * ...what you want to expand...
90 */
91
92#define XCHAL_NCP_SA_NUM 8
93#define XCHAL_NCP_SA_LIST(s) \
94 XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
95 XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
96 XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
97 XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
98 XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
99 XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0) \
100 XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0) \
101 XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0)
102
103#define XCHAL_CP0_SA_NUM 0
104#define XCHAL_CP0_SA_LIST(s) /* empty */
105
106#define XCHAL_CP1_SA_NUM 0
107#define XCHAL_CP1_SA_LIST(s) /* empty */
108
109#define XCHAL_CP2_SA_NUM 0
110#define XCHAL_CP2_SA_LIST(s) /* empty */
111
112#define XCHAL_CP3_SA_NUM 0
113#define XCHAL_CP3_SA_LIST(s) /* empty */
114
115#define XCHAL_CP4_SA_NUM 0
116#define XCHAL_CP4_SA_LIST(s) /* empty */
117
118#define XCHAL_CP5_SA_NUM 0
119#define XCHAL_CP5_SA_LIST(s) /* empty */
120
121#define XCHAL_CP6_SA_NUM 0
122#define XCHAL_CP6_SA_LIST(s) /* empty */
123
124#define XCHAL_CP7_SA_NUM 0
125#define XCHAL_CP7_SA_LIST(s) /* empty */
126
127/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
128#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
129
130#endif /*_XTENSA_CORE_TIE_H*/
131
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 1abfe664c444..a08c33a26ca9 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -129,6 +129,7 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
129extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); 129extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
130extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); 130extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
131extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); 131extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
132extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
132 133
133#define BITMAP_LAST_WORD_MASK(nbits) \ 134#define BITMAP_LAST_WORD_MASK(nbits) \
134( \ 135( \
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 08d783592b73..dfb30db475ed 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -354,6 +354,9 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
354 */ 354 */
355#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) 355#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
356 356
357#define dm_array_too_big(fixed, obj, num) \
358 ((num) > (UINT_MAX - (fixed)) / (obj))
359
357static inline sector_t to_sector(unsigned long n) 360static inline sector_t to_sector(unsigned long n)
358{ 361{
359 return (n >> SECTOR_SHIFT); 362 return (n >> SECTOR_SHIFT);
diff --git a/include/linux/device.h b/include/linux/device.h
index 987f5912720a..1a3686d15f98 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -450,7 +450,7 @@ static inline void set_dev_node(struct device *dev, int node)
450} 450}
451#endif 451#endif
452 452
453static inline void *dev_get_drvdata(struct device *dev) 453static inline void *dev_get_drvdata(const struct device *dev)
454{ 454{
455 return dev->driver_data; 455 return dev->driver_data;
456} 456}
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
new file mode 100644
index 000000000000..a9e652a41373
--- /dev/null
+++ b/include/linux/dm-region-hash.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * Device-Mapper dirty region hash interface.
6 *
7 * This file is released under the GPL.
8 */
9
10#ifndef DM_REGION_HASH_H
11#define DM_REGION_HASH_H
12
13#include <linux/dm-dirty-log.h>
14
15/*-----------------------------------------------------------------
16 * Region hash
17 *----------------------------------------------------------------*/
18struct dm_region_hash;
19struct dm_region;
20
21/*
22 * States a region can have.
23 */
24enum dm_rh_region_states {
25 DM_RH_CLEAN = 0x01, /* No writes in flight. */
26 DM_RH_DIRTY = 0x02, /* Writes in flight. */
27 DM_RH_NOSYNC = 0x04, /* Out of sync. */
28 DM_RH_RECOVERING = 0x08, /* Under resynchronization. */
29};
30
31/*
32 * Region hash create/destroy.
33 */
34struct bio_list;
35struct dm_region_hash *dm_region_hash_create(
36 void *context, void (*dispatch_bios)(void *context,
37 struct bio_list *bios),
38 void (*wakeup_workers)(void *context),
39 void (*wakeup_all_recovery_waiters)(void *context),
40 sector_t target_begin, unsigned max_recovery,
41 struct dm_dirty_log *log, uint32_t region_size,
42 region_t nr_regions);
43void dm_region_hash_destroy(struct dm_region_hash *rh);
44
45struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh);
46
47/*
48 * Conversion functions.
49 */
50region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio);
51sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region);
52void *dm_rh_region_context(struct dm_region *reg);
53
54/*
55 * Get region size and key (ie. number of the region).
56 */
57sector_t dm_rh_get_region_size(struct dm_region_hash *rh);
58region_t dm_rh_get_region_key(struct dm_region *reg);
59
60/*
61 * Get/set/update region state (and dirty log).
62 *
63 */
64int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block);
65void dm_rh_set_state(struct dm_region_hash *rh, region_t region,
66 enum dm_rh_region_states state, int may_block);
67
68/* Non-zero errors_handled leaves the state of the region NOSYNC */
69void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled);
70
71/* Flush the region hash and dirty log. */
72int dm_rh_flush(struct dm_region_hash *rh);
73
74/* Inc/dec pending count on regions. */
75void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios);
76void dm_rh_dec(struct dm_region_hash *rh, region_t region);
77
78/* Delay bios on regions. */
79void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
80
81void dm_rh_mark_nosync(struct dm_region_hash *rh,
82 struct bio *bio, unsigned done, int error);
83
84/*
85 * Region recovery control.
86 */
87
88/* Prepare some regions for recovery by starting to quiesce them. */
89void dm_rh_recovery_prepare(struct dm_region_hash *rh);
90
91/* Try fetching a quiesced region for recovery. */
92struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh);
93
94/* Report recovery end on a region. */
95void dm_rh_recovery_end(struct dm_region *reg, int error);
96
97/* Returns number of regions with recovery work outstanding. */
98int dm_rh_recovery_in_flight(struct dm_region_hash *rh);
99
100/* Start/stop recovery. */
101void dm_rh_start_recovery(struct dm_region_hash *rh);
102void dm_rh_stop_recovery(struct dm_region_hash *rh);
103
104#endif /* DM_REGION_HASH_H */
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index bff5c65f81dc..952df39c989d 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -2,15 +2,14 @@
2#define _DMA_REMAPPING_H 2#define _DMA_REMAPPING_H
3 3
4/* 4/*
5 * We need a fixed PAGE_SIZE of 4K irrespective of 5 * VT-d hardware uses 4KiB page size regardless of host page size.
6 * arch PAGE_SIZE for IOMMU page tables.
7 */ 6 */
8#define PAGE_SHIFT_4K (12) 7#define VTD_PAGE_SHIFT (12)
9#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) 8#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
10#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) 9#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
11#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) 10#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
12 11
13#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) 12#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
14#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) 13#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
15#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) 14#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
16 15
@@ -25,7 +24,7 @@ struct root_entry {
25 u64 val; 24 u64 val;
26 u64 rsvd1; 25 u64 rsvd1;
27}; 26};
28#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) 27#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
29static inline bool root_present(struct root_entry *root) 28static inline bool root_present(struct root_entry *root)
30{ 29{
31 return (root->val & 1); 30 return (root->val & 1);
@@ -36,7 +35,7 @@ static inline void set_root_present(struct root_entry *root)
36} 35}
37static inline void set_root_value(struct root_entry *root, unsigned long value) 36static inline void set_root_value(struct root_entry *root, unsigned long value)
38{ 37{
39 root->val |= value & PAGE_MASK_4K; 38 root->val |= value & VTD_PAGE_MASK;
40} 39}
41 40
42struct context_entry; 41struct context_entry;
@@ -45,7 +44,7 @@ get_context_addr_from_root(struct root_entry *root)
45{ 44{
46 return (struct context_entry *) 45 return (struct context_entry *)
47 (root_present(root)?phys_to_virt( 46 (root_present(root)?phys_to_virt(
48 root->val & PAGE_MASK_4K): 47 root->val & VTD_PAGE_MASK) :
49 NULL); 48 NULL);
50} 49}
51 50
@@ -67,7 +66,7 @@ struct context_entry {
67#define context_present(c) ((c).lo & 1) 66#define context_present(c) ((c).lo & 1)
68#define context_fault_disable(c) (((c).lo >> 1) & 1) 67#define context_fault_disable(c) (((c).lo >> 1) & 1)
69#define context_translation_type(c) (((c).lo >> 2) & 3) 68#define context_translation_type(c) (((c).lo >> 2) & 3)
70#define context_address_root(c) ((c).lo & PAGE_MASK_4K) 69#define context_address_root(c) ((c).lo & VTD_PAGE_MASK)
71#define context_address_width(c) ((c).hi & 7) 70#define context_address_width(c) ((c).hi & 7)
72#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) 71#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
73 72
@@ -81,7 +80,7 @@ struct context_entry {
81 } while (0) 80 } while (0)
82#define CONTEXT_TT_MULTI_LEVEL 0 81#define CONTEXT_TT_MULTI_LEVEL 0
83#define context_set_address_root(c, val) \ 82#define context_set_address_root(c, val) \
84 do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) 83 do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0)
85#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) 84#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
86#define context_set_domain_id(c, val) \ 85#define context_set_domain_id(c, val) \
87 do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) 86 do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
@@ -107,9 +106,9 @@ struct dma_pte {
107#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) 106#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
108#define dma_set_pte_prot(p, prot) \ 107#define dma_set_pte_prot(p, prot) \
109 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) 108 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
110#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) 109#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK)
111#define dma_set_pte_addr(p, addr) do {\ 110#define dma_set_pte_addr(p, addr) do {\
112 (p).val |= ((addr) & PAGE_MASK_4K); } while (0) 111 (p).val |= ((addr) & VTD_PAGE_MASK); } while (0)
113#define dma_pte_present(p) (((p).val & 3) != 0) 112#define dma_pte_present(p) (((p).val & 3) != 0)
114 113
115struct intel_iommu; 114struct intel_iommu;
diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h
index 0177d280f733..0f91a957a690 100644
--- a/include/linux/i2c-algo-pcf.h
+++ b/include/linux/i2c-algo-pcf.h
@@ -31,7 +31,10 @@ struct i2c_algo_pcf_data {
31 int (*getpcf) (void *data, int ctl); 31 int (*getpcf) (void *data, int ctl);
32 int (*getown) (void *data); 32 int (*getown) (void *data);
33 int (*getclock) (void *data); 33 int (*getclock) (void *data);
34 void (*waitforpin) (void); 34 void (*waitforpin) (void *data);
35
36 void (*xfer_begin) (void *data);
37 void (*xfer_end) (void *data);
35 38
36 /* Multi-master lost arbitration back-off delay (msecs) 39 /* Multi-master lost arbitration back-off delay (msecs)
37 * This should be set by the bus adapter or knowledgable client 40 * This should be set by the bus adapter or knowledgable client
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 06115128047f..33a5992d4936 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -53,45 +53,44 @@ struct i2c_board_info;
53 * transmit one message at a time, a more complex version can be used to 53 * transmit one message at a time, a more complex version can be used to
54 * transmit an arbitrary number of messages without interruption. 54 * transmit an arbitrary number of messages without interruption.
55 */ 55 */
56extern int i2c_master_send(struct i2c_client *,const char* ,int); 56extern int i2c_master_send(struct i2c_client *client, const char *buf,
57extern int i2c_master_recv(struct i2c_client *,char* ,int); 57 int count);
58extern int i2c_master_recv(struct i2c_client *client, char *buf, int count);
58 59
59/* Transfer num messages. 60/* Transfer num messages.
60 */ 61 */
61extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num); 62extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
62 63 int num);
63 64
64/* This is the very generalized SMBus access routine. You probably do not 65/* This is the very generalized SMBus access routine. You probably do not
65 want to use this, though; one of the functions below may be much easier, 66 want to use this, though; one of the functions below may be much easier,
66 and probably just as fast. 67 and probably just as fast.
67 Note that we use i2c_adapter here, because you do not need a specific 68 Note that we use i2c_adapter here, because you do not need a specific
68 smbus adapter to call this function. */ 69 smbus adapter to call this function. */
69extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr, 70extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
70 unsigned short flags, 71 unsigned short flags, char read_write, u8 command,
71 char read_write, u8 command, int size, 72 int size, union i2c_smbus_data *data);
72 union i2c_smbus_data * data);
73 73
74/* Now follow the 'nice' access routines. These also document the calling 74/* Now follow the 'nice' access routines. These also document the calling
75 conventions of i2c_smbus_xfer. */ 75 conventions of i2c_smbus_xfer. */
76 76
77extern s32 i2c_smbus_read_byte(struct i2c_client * client); 77extern s32 i2c_smbus_read_byte(struct i2c_client *client);
78extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value); 78extern s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value);
79extern s32 i2c_smbus_read_byte_data(struct i2c_client * client, u8 command); 79extern s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command);
80extern s32 i2c_smbus_write_byte_data(struct i2c_client * client, 80extern s32 i2c_smbus_write_byte_data(struct i2c_client *client,
81 u8 command, u8 value); 81 u8 command, u8 value);
82extern s32 i2c_smbus_read_word_data(struct i2c_client * client, u8 command); 82extern s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command);
83extern s32 i2c_smbus_write_word_data(struct i2c_client * client, 83extern s32 i2c_smbus_write_word_data(struct i2c_client *client,
84 u8 command, u16 value); 84 u8 command, u16 value);
85/* Returns the number of read bytes */ 85/* Returns the number of read bytes */
86extern s32 i2c_smbus_read_block_data(struct i2c_client *client, 86extern s32 i2c_smbus_read_block_data(struct i2c_client *client,
87 u8 command, u8 *values); 87 u8 command, u8 *values);
88extern s32 i2c_smbus_write_block_data(struct i2c_client * client, 88extern s32 i2c_smbus_write_block_data(struct i2c_client *client,
89 u8 command, u8 length, 89 u8 command, u8 length, const u8 *values);
90 const u8 *values);
91/* Returns the number of read bytes */ 90/* Returns the number of read bytes */
92extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client * client, 91extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client,
93 u8 command, u8 length, u8 *values); 92 u8 command, u8 length, u8 *values);
94extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client, 93extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
95 u8 command, u8 length, 94 u8 command, u8 length,
96 const u8 *values); 95 const u8 *values);
97 96
@@ -169,7 +168,7 @@ struct i2c_driver {
169 /* a ioctl like command that can be used to perform specific functions 168 /* a ioctl like command that can be used to perform specific functions
170 * with the device. 169 * with the device.
171 */ 170 */
172 int (*command)(struct i2c_client *client,unsigned int cmd, void *arg); 171 int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
173 172
174 struct device_driver driver; 173 struct device_driver driver;
175 const struct i2c_device_id *id_table; 174 const struct i2c_device_id *id_table;
@@ -224,14 +223,14 @@ static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
224 return to_i2c_client(dev); 223 return to_i2c_client(dev);
225} 224}
226 225
227static inline void *i2c_get_clientdata (struct i2c_client *dev) 226static inline void *i2c_get_clientdata(const struct i2c_client *dev)
228{ 227{
229 return dev_get_drvdata (&dev->dev); 228 return dev_get_drvdata(&dev->dev);
230} 229}
231 230
232static inline void i2c_set_clientdata (struct i2c_client *dev, void *data) 231static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
233{ 232{
234 dev_set_drvdata (&dev->dev, data); 233 dev_set_drvdata(&dev->dev, data);
235} 234}
236 235
237/** 236/**
@@ -240,6 +239,7 @@ static inline void i2c_set_clientdata (struct i2c_client *dev, void *data)
240 * @flags: to initialize i2c_client.flags 239 * @flags: to initialize i2c_client.flags
241 * @addr: stored in i2c_client.addr 240 * @addr: stored in i2c_client.addr
242 * @platform_data: stored in i2c_client.dev.platform_data 241 * @platform_data: stored in i2c_client.dev.platform_data
242 * @archdata: copied into i2c_client.dev.archdata
243 * @irq: stored in i2c_client.irq 243 * @irq: stored in i2c_client.irq
244 * 244 *
245 * I2C doesn't actually support hardware probing, although controllers and 245 * I2C doesn't actually support hardware probing, although controllers and
@@ -259,6 +259,7 @@ struct i2c_board_info {
259 unsigned short flags; 259 unsigned short flags;
260 unsigned short addr; 260 unsigned short addr;
261 void *platform_data; 261 void *platform_data;
262 struct dev_archdata *archdata;
262 int irq; 263 int irq;
263}; 264};
264 265
@@ -272,7 +273,7 @@ struct i2c_board_info {
272 * fields (such as associated irq, or device-specific platform_data) 273 * fields (such as associated irq, or device-specific platform_data)
273 * are provided using conventional syntax. 274 * are provided using conventional syntax.
274 */ 275 */
275#define I2C_BOARD_INFO(dev_type,dev_addr) \ 276#define I2C_BOARD_INFO(dev_type, dev_addr) \
276 .type = (dev_type), .addr = (dev_addr) 277 .type = (dev_type), .addr = (dev_addr)
277 278
278 279
@@ -306,10 +307,12 @@ extern void i2c_unregister_device(struct i2c_client *);
306 */ 307 */
307#ifdef CONFIG_I2C_BOARDINFO 308#ifdef CONFIG_I2C_BOARDINFO
308extern int 309extern int
309i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned n); 310i2c_register_board_info(int busnum, struct i2c_board_info const *info,
311 unsigned n);
310#else 312#else
311static inline int 313static inline int
312i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned n) 314i2c_register_board_info(int busnum, struct i2c_board_info const *info,
315 unsigned n)
313{ 316{
314 return 0; 317 return 0;
315} 318}
@@ -328,11 +331,11 @@ struct i2c_algorithm {
328 using common I2C messages */ 331 using common I2C messages */
329 /* master_xfer should return the number of messages successfully 332 /* master_xfer should return the number of messages successfully
330 processed, or a negative value on error */ 333 processed, or a negative value on error */
331 int (*master_xfer)(struct i2c_adapter *adap,struct i2c_msg *msgs, 334 int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
332 int num); 335 int num);
333 int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr, 336 int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
334 unsigned short flags, char read_write, 337 unsigned short flags, char read_write,
335 u8 command, int size, union i2c_smbus_data * data); 338 u8 command, int size, union i2c_smbus_data *data);
336 339
337 /* To determine what the adapter supports */ 340 /* To determine what the adapter supports */
338 u32 (*functionality) (struct i2c_adapter *); 341 u32 (*functionality) (struct i2c_adapter *);
@@ -345,7 +348,7 @@ struct i2c_algorithm {
345struct i2c_adapter { 348struct i2c_adapter {
346 struct module *owner; 349 struct module *owner;
347 unsigned int id; 350 unsigned int id;
348 unsigned int class; 351 unsigned int class; /* classes to allow probing for */
349 const struct i2c_algorithm *algo; /* the algorithm to access the bus */ 352 const struct i2c_algorithm *algo; /* the algorithm to access the bus */
350 void *algo_data; 353 void *algo_data;
351 354
@@ -369,14 +372,14 @@ struct i2c_adapter {
369}; 372};
370#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) 373#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
371 374
372static inline void *i2c_get_adapdata (struct i2c_adapter *dev) 375static inline void *i2c_get_adapdata(const struct i2c_adapter *dev)
373{ 376{
374 return dev_get_drvdata (&dev->dev); 377 return dev_get_drvdata(&dev->dev);
375} 378}
376 379
377static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data) 380static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
378{ 381{
379 dev_set_drvdata (&dev->dev, data); 382 dev_set_drvdata(&dev->dev, data);
380} 383}
381 384
382/*flags for the client struct: */ 385/*flags for the client struct: */
@@ -449,7 +452,7 @@ extern int i2c_probe(struct i2c_adapter *adapter,
449 const struct i2c_client_address_data *address_data, 452 const struct i2c_client_address_data *address_data,
450 int (*found_proc) (struct i2c_adapter *, int, int)); 453 int (*found_proc) (struct i2c_adapter *, int, int));
451 454
452extern struct i2c_adapter* i2c_get_adapter(int id); 455extern struct i2c_adapter *i2c_get_adapter(int id);
453extern void i2c_put_adapter(struct i2c_adapter *adap); 456extern void i2c_put_adapter(struct i2c_adapter *adap);
454 457
455 458
@@ -465,7 +468,7 @@ static inline int i2c_check_functionality(struct i2c_adapter *adap, u32 func)
465 return (func & i2c_get_functionality(adap)) == func; 468 return (func & i2c_get_functionality(adap)) == func;
466} 469}
467 470
468/* Return id number for a specific adapter */ 471/* Return the adapter number for a specific adapter */
469static inline int i2c_adapter_id(struct i2c_adapter *adap) 472static inline int i2c_adapter_id(struct i2c_adapter *adap)
470{ 473{
471 return adap->nr; 474 return adap->nr;
@@ -526,7 +529,7 @@ struct i2c_msg {
526 529
527#define I2C_FUNC_I2C 0x00000001 530#define I2C_FUNC_I2C 0x00000001
528#define I2C_FUNC_10BIT_ADDR 0x00000002 531#define I2C_FUNC_10BIT_ADDR 0x00000002
529#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_{REV_DIR_ADDR,NOSTART,..} */ 532#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_NOSTART etc. */
530#define I2C_FUNC_SMBUS_PEC 0x00000008 533#define I2C_FUNC_SMBUS_PEC 0x00000008
531#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */ 534#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
532#define I2C_FUNC_SMBUS_QUICK 0x00010000 535#define I2C_FUNC_SMBUS_QUICK 0x00010000
@@ -541,30 +544,26 @@ struct i2c_msg {
541#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000 544#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000
542#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */ 545#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */
543#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */ 546#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */
544#define I2C_FUNC_SMBUS_READ_I2C_BLOCK_2 0x10000000 /* I2C-like block xfer */ 547
545#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK_2 0x20000000 /* w/ 2-byte reg. addr. */ 548#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \
546 549 I2C_FUNC_SMBUS_WRITE_BYTE)
547#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \ 550#define I2C_FUNC_SMBUS_BYTE_DATA (I2C_FUNC_SMBUS_READ_BYTE_DATA | \
548 I2C_FUNC_SMBUS_WRITE_BYTE) 551 I2C_FUNC_SMBUS_WRITE_BYTE_DATA)
549#define I2C_FUNC_SMBUS_BYTE_DATA (I2C_FUNC_SMBUS_READ_BYTE_DATA | \ 552#define I2C_FUNC_SMBUS_WORD_DATA (I2C_FUNC_SMBUS_READ_WORD_DATA | \
550 I2C_FUNC_SMBUS_WRITE_BYTE_DATA) 553 I2C_FUNC_SMBUS_WRITE_WORD_DATA)
551#define I2C_FUNC_SMBUS_WORD_DATA (I2C_FUNC_SMBUS_READ_WORD_DATA | \ 554#define I2C_FUNC_SMBUS_BLOCK_DATA (I2C_FUNC_SMBUS_READ_BLOCK_DATA | \
552 I2C_FUNC_SMBUS_WRITE_WORD_DATA) 555 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)
553#define I2C_FUNC_SMBUS_BLOCK_DATA (I2C_FUNC_SMBUS_READ_BLOCK_DATA | \ 556#define I2C_FUNC_SMBUS_I2C_BLOCK (I2C_FUNC_SMBUS_READ_I2C_BLOCK | \
554 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA) 557 I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)
555#define I2C_FUNC_SMBUS_I2C_BLOCK (I2C_FUNC_SMBUS_READ_I2C_BLOCK | \ 558
556 I2C_FUNC_SMBUS_WRITE_I2C_BLOCK) 559#define I2C_FUNC_SMBUS_EMUL (I2C_FUNC_SMBUS_QUICK | \
557#define I2C_FUNC_SMBUS_I2C_BLOCK_2 (I2C_FUNC_SMBUS_READ_I2C_BLOCK_2 | \ 560 I2C_FUNC_SMBUS_BYTE | \
558 I2C_FUNC_SMBUS_WRITE_I2C_BLOCK_2) 561 I2C_FUNC_SMBUS_BYTE_DATA | \
559 562 I2C_FUNC_SMBUS_WORD_DATA | \
560#define I2C_FUNC_SMBUS_EMUL (I2C_FUNC_SMBUS_QUICK | \ 563 I2C_FUNC_SMBUS_PROC_CALL | \
561 I2C_FUNC_SMBUS_BYTE | \ 564 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \
562 I2C_FUNC_SMBUS_BYTE_DATA | \ 565 I2C_FUNC_SMBUS_I2C_BLOCK | \
563 I2C_FUNC_SMBUS_WORD_DATA | \ 566 I2C_FUNC_SMBUS_PEC)
564 I2C_FUNC_SMBUS_PROC_CALL | \
565 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \
566 I2C_FUNC_SMBUS_I2C_BLOCK | \
567 I2C_FUNC_SMBUS_PEC)
568 567
569/* 568/*
570 * Data for SMBus Messages 569 * Data for SMBus Messages
@@ -574,7 +573,7 @@ union i2c_smbus_data {
574 __u8 byte; 573 __u8 byte;
575 __u16 word; 574 __u16 word;
576 __u8 block[I2C_SMBUS_BLOCK_MAX + 2]; /* block[0] is used for length */ 575 __u8 block[I2C_SMBUS_BLOCK_MAX + 2]; /* block[0] is used for length */
577 /* and one more for user-space compatibility */ 576 /* and one more for user-space compatibility */
578}; 577};
579 578
580/* i2c_smbus_xfer read or write markers */ 579/* i2c_smbus_xfer read or write markers */
@@ -602,21 +601,21 @@ union i2c_smbus_data {
602 601
603/* Default fill of many variables */ 602/* Default fill of many variables */
604#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 603#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
605 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 604 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
606 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 605 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
607 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 606 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
608 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 607 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
609 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 608 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
610 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 609 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
611 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 610 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
612 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 611 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
613 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 612 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
614 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 613 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
615 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 614 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
616 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 615 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
617 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 616 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
618 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 617 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
619 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END} 618 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
620 619
621/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the 620/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the
622 module header */ 621 module header */
@@ -625,7 +624,7 @@ union i2c_smbus_data {
625 static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \ 624 static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \
626 static unsigned int var##_num; \ 625 static unsigned int var##_num; \
627 module_param_array(var, short, &var##_num, 0); \ 626 module_param_array(var, short, &var##_num, 0); \
628 MODULE_PARM_DESC(var,desc) 627 MODULE_PARM_DESC(var, desc)
629 628
630#define I2C_CLIENT_MODULE_PARM_FORCE(name) \ 629#define I2C_CLIENT_MODULE_PARM_FORCE(name) \
631I2C_CLIENT_MODULE_PARM(force_##name, \ 630I2C_CLIENT_MODULE_PARM(force_##name, \
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h
index cdb453162a97..fb604dcd38f1 100644
--- a/include/linux/i2c/twl4030.h
+++ b/include/linux/i2c/twl4030.h
@@ -228,6 +228,12 @@ struct twl4030_gpio_platform_data {
228 int gpio_base; 228 int gpio_base;
229 unsigned irq_base, irq_end; 229 unsigned irq_base, irq_end;
230 230
231 /* package the two LED signals as output-only GPIOs? */
232 bool use_leds;
233
234 /* gpio-n should control VMMC(n+1) if BIT(n) in mmc_cd is set */
235 u8 mmc_cd;
236
231 /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup 237 /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup
232 * should be enabled. Else, if that bit is set in "pulldowns", 238 * should be enabled. Else, if that bit is set in "pulldowns",
233 * that pulldown is enabled. Don't waste power by letting any 239 * that pulldown is enabled. Don't waste power by letting any
@@ -277,6 +283,8 @@ struct twl4030_platform_data {
277 283
278/*----------------------------------------------------------------------*/ 284/*----------------------------------------------------------------------*/
279 285
286int twl4030_sih_setup(int module);
287
280/* 288/*
281 * FIXME completely stop using TWL4030_IRQ_BASE ... instead, pass the 289 * FIXME completely stop using TWL4030_IRQ_BASE ... instead, pass the
282 * IRQ data to subsidiary devices using platform device resources. 290 * IRQ data to subsidiary devices using platform device resources.
@@ -291,16 +299,16 @@ struct twl4030_platform_data {
291#define TWL4030_MODIRQ_BCI (TWL4030_IRQ_BASE + 2) 299#define TWL4030_MODIRQ_BCI (TWL4030_IRQ_BASE + 2)
292#define TWL4030_MODIRQ_MADC (TWL4030_IRQ_BASE + 3) 300#define TWL4030_MODIRQ_MADC (TWL4030_IRQ_BASE + 3)
293/* #define TWL4030_MODIRQ_USB (TWL4030_IRQ_BASE + 4) */ 301/* #define TWL4030_MODIRQ_USB (TWL4030_IRQ_BASE + 4) */
294#define TWL4030_MODIRQ_PWR (TWL4030_IRQ_BASE + 5) 302/* #define TWL4030_MODIRQ_PWR (TWL4030_IRQ_BASE + 5) */
295 303
296#define TWL4030_PWRIRQ_PWRBTN (TWL4030_PWR_IRQ_BASE + 0) 304#define TWL4030_PWRIRQ_PWRBTN (TWL4030_PWR_IRQ_BASE + 0)
297#define TWL4030_PWRIRQ_CHG_PRES (TWL4030_PWR_IRQ_BASE + 1) 305/* #define TWL4030_PWRIRQ_CHG_PRES (TWL4030_PWR_IRQ_BASE + 1) */
298#define TWL4030_PWRIRQ_USB_PRES (TWL4030_PWR_IRQ_BASE + 2) 306/* #define TWL4030_PWRIRQ_USB_PRES (TWL4030_PWR_IRQ_BASE + 2) */
299#define TWL4030_PWRIRQ_RTC (TWL4030_PWR_IRQ_BASE + 3) 307/* #define TWL4030_PWRIRQ_RTC (TWL4030_PWR_IRQ_BASE + 3) */
300#define TWL4030_PWRIRQ_HOT_DIE (TWL4030_PWR_IRQ_BASE + 4) 308/* #define TWL4030_PWRIRQ_HOT_DIE (TWL4030_PWR_IRQ_BASE + 4) */
301#define TWL4030_PWRIRQ_PWROK_TIMEOUT (TWL4030_PWR_IRQ_BASE + 5) 309/* #define TWL4030_PWRIRQ_PWROK_TIMEOUT (TWL4030_PWR_IRQ_BASE + 5) */
302#define TWL4030_PWRIRQ_MBCHG (TWL4030_PWR_IRQ_BASE + 6) 310/* #define TWL4030_PWRIRQ_MBCHG (TWL4030_PWR_IRQ_BASE + 6) */
303#define TWL4030_PWRIRQ_SC_DETECT (TWL4030_PWR_IRQ_BASE + 7) 311/* #define TWL4030_PWRIRQ_SC_DETECT (TWL4030_PWR_IRQ_BASE + 7) */
304 312
305/* Rest are unsued currently*/ 313/* Rest are unsued currently*/
306 314
@@ -317,17 +325,13 @@ struct twl4030_platform_data {
317/* TWL4030 GPIO interrupt definitions */ 325/* TWL4030 GPIO interrupt definitions */
318 326
319#define TWL4030_GPIO_IRQ_NO(n) (TWL4030_GPIO_IRQ_BASE + (n)) 327#define TWL4030_GPIO_IRQ_NO(n) (TWL4030_GPIO_IRQ_BASE + (n))
320#define TWL4030_GPIO_IS_ENABLE 1
321 328
322/* 329/*
323 * Exported TWL4030 GPIO APIs 330 * Exported TWL4030 GPIO APIs
324 * 331 *
325 * WARNING -- use standard GPIO and IRQ calls instead; these will vanish. 332 * WARNING -- use standard GPIO and IRQ calls instead; these will vanish.
326 */ 333 */
327int twl4030_get_gpio_datain(int gpio);
328int twl4030_request_gpio(int gpio);
329int twl4030_set_gpio_debounce(int gpio, int enable); 334int twl4030_set_gpio_debounce(int gpio, int enable);
330int twl4030_free_gpio(int gpio);
331 335
332#if defined(CONFIG_TWL4030_BCI_BATTERY) || \ 336#if defined(CONFIG_TWL4030_BCI_BATTERY) || \
333 defined(CONFIG_TWL4030_BCI_BATTERY_MODULE) 337 defined(CONFIG_TWL4030_BCI_BATTERY_MODULE)
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 2e117f30a76c..3d017cfd245b 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -29,6 +29,7 @@
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/dma_remapping.h> 30#include <linux/dma_remapping.h>
31#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
32#include <asm/iommu.h>
32 33
33/* 34/*
34 * Intel IOMMU register specification per version 1.0 public spec. 35 * Intel IOMMU register specification per version 1.0 public spec.
@@ -127,6 +128,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
127 128
128 129
129/* IOTLB_REG */ 130/* IOTLB_REG */
131#define DMA_TLB_FLUSH_GRANU_OFFSET 60
130#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) 132#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
131#define DMA_TLB_DSI_FLUSH (((u64)2) << 60) 133#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
132#define DMA_TLB_PSI_FLUSH (((u64)3) << 60) 134#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
@@ -140,6 +142,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
140#define DMA_TLB_MAX_SIZE (0x3f) 142#define DMA_TLB_MAX_SIZE (0x3f)
141 143
142/* INVALID_DESC */ 144/* INVALID_DESC */
145#define DMA_CCMD_INVL_GRANU_OFFSET 61
143#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) 146#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
144#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) 147#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
145#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) 148#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
@@ -200,22 +203,21 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
200#define dma_frcd_type(d) ((d >> 30) & 1) 203#define dma_frcd_type(d) ((d >> 30) & 1)
201#define dma_frcd_fault_reason(c) (c & 0xff) 204#define dma_frcd_fault_reason(c) (c & 0xff)
202#define dma_frcd_source_id(c) (c & 0xffff) 205#define dma_frcd_source_id(c) (c & 0xffff)
203#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ 206/* low 64 bit */
204 207#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
205#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ 208
206 209#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
207#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ 210do { \
208{\ 211 cycles_t start_time = get_cycles(); \
209 cycles_t start_time = get_cycles();\ 212 while (1) { \
210 while (1) {\ 213 sts = op(iommu->reg + offset); \
211 sts = op (iommu->reg + offset);\ 214 if (cond) \
212 if (cond)\ 215 break; \
213 break;\
214 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ 216 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
215 panic("DMAR hardware is malfunctioning\n");\ 217 panic("DMAR hardware is malfunctioning\n"); \
216 cpu_relax();\ 218 cpu_relax(); \
217 }\ 219 } \
218} 220} while (0)
219 221
220#define QI_LENGTH 256 /* queue length */ 222#define QI_LENGTH 256 /* queue length */
221 223
@@ -238,6 +240,19 @@ enum {
238#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) 240#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
239#define QI_IWD_STATUS_WRITE (((u64)1) << 5) 241#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
240 242
243#define QI_IOTLB_DID(did) (((u64)did) << 16)
244#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
245#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
246#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
247#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
248#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
249#define QI_IOTLB_AM(am) (((u8)am))
250
251#define QI_CC_FM(fm) (((u64)fm) << 48)
252#define QI_CC_SID(sid) (((u64)sid) << 32)
253#define QI_CC_DID(did) (((u64)did) << 16)
254#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
255
241struct qi_desc { 256struct qi_desc {
242 u64 low, high; 257 u64 low, high;
243}; 258};
@@ -263,6 +278,13 @@ struct ir_table {
263}; 278};
264#endif 279#endif
265 280
281struct iommu_flush {
282 int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
283 u64 type, int non_present_entry_flush);
284 int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
285 unsigned int size_order, u64 type, int non_present_entry_flush);
286};
287
266struct intel_iommu { 288struct intel_iommu {
267 void __iomem *reg; /* Pointer to hardware regs, virtual addr */ 289 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
268 u64 cap; 290 u64 cap;
@@ -282,6 +304,7 @@ struct intel_iommu {
282 unsigned char name[7]; /* Device Name */ 304 unsigned char name[7]; /* Device Name */
283 struct msi_msg saved_msg; 305 struct msi_msg saved_msg;
284 struct sys_device sysdev; 306 struct sys_device sysdev;
307 struct iommu_flush flush;
285#endif 308#endif
286 struct q_inval *qi; /* Queued invalidation info */ 309 struct q_inval *qi; /* Queued invalidation info */
287#ifdef CONFIG_INTR_REMAP 310#ifdef CONFIG_INTR_REMAP
@@ -303,6 +326,12 @@ extern void free_iommu(struct intel_iommu *iommu);
303extern int dmar_enable_qi(struct intel_iommu *iommu); 326extern int dmar_enable_qi(struct intel_iommu *iommu);
304extern void qi_global_iec(struct intel_iommu *iommu); 327extern void qi_global_iec(struct intel_iommu *iommu);
305 328
329extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
330 u8 fm, u64 type, int non_present_entry_flush);
331extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
332 unsigned int size_order, u64 type,
333 int non_present_entry_flush);
334
306extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); 335extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
307 336
308void intel_iommu_domain_exit(struct dmar_domain *domain); 337void intel_iommu_domain_exit(struct dmar_domain *domain);
@@ -324,4 +353,11 @@ static inline int intel_iommu_found(void)
324} 353}
325#endif /* CONFIG_DMAR */ 354#endif /* CONFIG_DMAR */
326 355
356extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
357extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
358extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
359extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
360extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
361extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
362
327#endif 363#endif
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 3171ddc3b39d..452c280c8115 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -13,9 +13,9 @@ extern int nr_irqs;
13# define for_each_irq_desc(irq, desc) \ 13# define for_each_irq_desc(irq, desc) \
14 for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) 14 for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++)
15 15
16# define for_each_irq_desc_reverse(irq, desc) \ 16# define for_each_irq_desc_reverse(irq, desc) \
17 for (irq = nr_irqs -1, desc = irq_desc + (nr_irqs -1 ); \ 17 for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \
18 irq > 0; irq--, desc--) 18 irq >= 0; irq--, desc--)
19#endif 19#endif
20 20
21#define for_each_irq_nr(irq) \ 21#define for_each_irq_nr(irq) \
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 35d4f6342fac..346e2b80be7d 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -911,7 +911,7 @@ extern int journal_set_features
911 (journal_t *, unsigned long, unsigned long, unsigned long); 911 (journal_t *, unsigned long, unsigned long, unsigned long);
912extern int journal_create (journal_t *); 912extern int journal_create (journal_t *);
913extern int journal_load (journal_t *journal); 913extern int journal_load (journal_t *journal);
914extern void journal_destroy (journal_t *); 914extern int journal_destroy (journal_t *);
915extern int journal_recover (journal_t *journal); 915extern int journal_recover (journal_t *journal);
916extern int journal_wipe (journal_t *, int); 916extern int journal_wipe (journal_t *, int);
917extern int journal_skip_recovery (journal_t *); 917extern int journal_skip_recovery (journal_t *);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 947cf84e555d..c261aa0584b1 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -340,6 +340,9 @@ enum {
340 340
341 ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, 341 ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
342 342
343 /* mask of flags to transfer *to* the slave link */
344 ATA_EHI_TO_SLAVE_MASK = ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET,
345
343 /* max tries if error condition is still set after ->error_handler */ 346 /* max tries if error condition is still set after ->error_handler */
344 ATA_EH_MAX_TRIES = 5, 347 ATA_EH_MAX_TRIES = 5,
345 348
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 77323a72dd3c..cf9c679ab38b 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -132,6 +132,15 @@ enum {
132 MLX4_MAILBOX_SIZE = 4096 132 MLX4_MAILBOX_SIZE = 4096
133}; 133};
134 134
135enum {
136 /* set port opcode modifiers */
137 MLX4_SET_PORT_GENERAL = 0x0,
138 MLX4_SET_PORT_RQP_CALC = 0x1,
139 MLX4_SET_PORT_MAC_TABLE = 0x2,
140 MLX4_SET_PORT_VLAN_TABLE = 0x3,
141 MLX4_SET_PORT_PRIO_MAP = 0x4,
142};
143
135struct mlx4_dev; 144struct mlx4_dev;
136 145
137struct mlx4_cmd_mailbox { 146struct mlx4_cmd_mailbox {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b2f944468313..bd9977b89490 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -60,6 +60,7 @@ enum {
60 MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7, 60 MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7,
61 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, 61 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8,
62 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, 62 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9,
63 MLX4_DEV_CAP_FLAG_DPDP = 1 << 12,
63 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, 64 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16,
64 MLX4_DEV_CAP_FLAG_APM = 1 << 17, 65 MLX4_DEV_CAP_FLAG_APM = 1 << 17,
65 MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, 66 MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18,
@@ -145,6 +146,29 @@ enum {
145 MLX4_MTT_FLAG_PRESENT = 1 146 MLX4_MTT_FLAG_PRESENT = 1
146}; 147};
147 148
149enum mlx4_qp_region {
150 MLX4_QP_REGION_FW = 0,
151 MLX4_QP_REGION_ETH_ADDR,
152 MLX4_QP_REGION_FC_ADDR,
153 MLX4_QP_REGION_FC_EXCH,
154 MLX4_NUM_QP_REGION
155};
156
157enum mlx4_port_type {
158 MLX4_PORT_TYPE_IB = 1 << 0,
159 MLX4_PORT_TYPE_ETH = 1 << 1,
160};
161
162enum mlx4_special_vlan_idx {
163 MLX4_NO_VLAN_IDX = 0,
164 MLX4_VLAN_MISS_IDX,
165 MLX4_VLAN_REGULAR
166};
167
168enum {
169 MLX4_NUM_FEXCH = 64 * 1024,
170};
171
148static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) 172static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
149{ 173{
150 return (major << 32) | (minor << 16) | subminor; 174 return (major << 32) | (minor << 16) | subminor;
@@ -154,7 +178,9 @@ struct mlx4_caps {
154 u64 fw_ver; 178 u64 fw_ver;
155 int num_ports; 179 int num_ports;
156 int vl_cap[MLX4_MAX_PORTS + 1]; 180 int vl_cap[MLX4_MAX_PORTS + 1];
157 int mtu_cap[MLX4_MAX_PORTS + 1]; 181 int ib_mtu_cap[MLX4_MAX_PORTS + 1];
182 u64 def_mac[MLX4_MAX_PORTS + 1];
183 int eth_mtu_cap[MLX4_MAX_PORTS + 1];
158 int gid_table_len[MLX4_MAX_PORTS + 1]; 184 int gid_table_len[MLX4_MAX_PORTS + 1];
159 int pkey_table_len[MLX4_MAX_PORTS + 1]; 185 int pkey_table_len[MLX4_MAX_PORTS + 1];
160 int local_ca_ack_delay; 186 int local_ca_ack_delay;
@@ -169,7 +195,6 @@ struct mlx4_caps {
169 int max_rq_desc_sz; 195 int max_rq_desc_sz;
170 int max_qp_init_rdma; 196 int max_qp_init_rdma;
171 int max_qp_dest_rdma; 197 int max_qp_dest_rdma;
172 int reserved_qps;
173 int sqp_start; 198 int sqp_start;
174 int num_srqs; 199 int num_srqs;
175 int max_srq_wqes; 200 int max_srq_wqes;
@@ -201,6 +226,15 @@ struct mlx4_caps {
201 u16 stat_rate_support; 226 u16 stat_rate_support;
202 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 227 u8 port_width_cap[MLX4_MAX_PORTS + 1];
203 int max_gso_sz; 228 int max_gso_sz;
229 int reserved_qps_cnt[MLX4_NUM_QP_REGION];
230 int reserved_qps;
231 int reserved_qps_base[MLX4_NUM_QP_REGION];
232 int log_num_macs;
233 int log_num_vlans;
234 int log_num_prios;
235 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
236 u8 supported_type[MLX4_MAX_PORTS + 1];
237 u32 port_mask;
204}; 238};
205 239
206struct mlx4_buf_list { 240struct mlx4_buf_list {
@@ -355,6 +389,11 @@ struct mlx4_init_port_param {
355 u64 si_guid; 389 u64 si_guid;
356}; 390};
357 391
392#define mlx4_foreach_port(port, dev, type) \
393 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
394 if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
395 ~(dev)->caps.port_mask) & 1 << ((port) - 1))
396
358int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, 397int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
359 struct mlx4_buf *buf); 398 struct mlx4_buf *buf);
360void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); 399void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -400,7 +439,10 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
400 int collapsed); 439 int collapsed);
401void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); 440void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
402 441
403int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp); 442int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
443void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
444
445int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
404void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); 446void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
405 447
406int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, 448int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
@@ -416,6 +458,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
416 int block_mcast_loopback); 458 int block_mcast_loopback);
417int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); 459int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
418 460
461int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
462void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
463
464int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
465void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
466
419int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, 467int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
420 int npages, u64 iova, u32 *lkey, u32 *rkey); 468 int npages, u64 iova, u32 *lkey, u32 *rkey);
421int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, 469int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
diff --git a/include/linux/module.h b/include/linux/module.h
index 5d2970cdce93..3bfed013350b 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -29,7 +29,7 @@
29#define MODULE_SYMBOL_PREFIX "" 29#define MODULE_SYMBOL_PREFIX ""
30#endif 30#endif
31 31
32#define MODULE_NAME_LEN (64 - sizeof(unsigned long)) 32#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
33 33
34struct kernel_symbol 34struct kernel_symbol
35{ 35{
@@ -60,6 +60,7 @@ struct module_kobject
60 struct kobject kobj; 60 struct kobject kobj;
61 struct module *mod; 61 struct module *mod;
62 struct kobject *drivers_dir; 62 struct kobject *drivers_dir;
63 struct module_param_attrs *mp;
63}; 64};
64 65
65/* These are either module local, or the kernel's dummy ones. */ 66/* These are either module local, or the kernel's dummy ones. */
@@ -242,7 +243,6 @@ struct module
242 243
243 /* Sysfs stuff. */ 244 /* Sysfs stuff. */
244 struct module_kobject mkobj; 245 struct module_kobject mkobj;
245 struct module_param_attrs *param_attrs;
246 struct module_attribute *modinfo_attrs; 246 struct module_attribute *modinfo_attrs;
247 const char *version; 247 const char *version;
248 const char *srcversion; 248 const char *srcversion;
@@ -277,7 +277,7 @@ struct module
277 277
278 /* Exception table */ 278 /* Exception table */
279 unsigned int num_exentries; 279 unsigned int num_exentries;
280 const struct exception_table_entry *extable; 280 struct exception_table_entry *extable;
281 281
282 /* Startup function. */ 282 /* Startup function. */
283 int (*init)(void); 283 int (*init)(void);
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index ec624381c844..e4af3399ef48 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -13,6 +13,9 @@
13#define MODULE_PARAM_PREFIX KBUILD_MODNAME "." 13#define MODULE_PARAM_PREFIX KBUILD_MODNAME "."
14#endif 14#endif
15 15
16/* Chosen so that structs with an unsigned long line up. */
17#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
18
16#ifdef MODULE 19#ifdef MODULE
17#define ___module_cat(a,b) __mod_ ## a ## b 20#define ___module_cat(a,b) __mod_ ## a ## b
18#define __module_cat(a,b) ___module_cat(a,b) 21#define __module_cat(a,b) ___module_cat(a,b)
@@ -79,7 +82,8 @@ struct kparam_array
79#define __module_param_call(prefix, name, set, get, arg, perm) \ 82#define __module_param_call(prefix, name, set, get, arg, perm) \
80 /* Default value instead of permissions? */ \ 83 /* Default value instead of permissions? */ \
81 static int __param_perm_check_##name __attribute__((unused)) = \ 84 static int __param_perm_check_##name __attribute__((unused)) = \
82 BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)); \ 85 BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \
86 + BUILD_BUG_ON_ZERO(sizeof(""prefix) > MAX_PARAM_PREFIX_LEN); \
83 static const char __param_str_##name[] = prefix #name; \ 87 static const char __param_str_##name[] = prefix #name; \
84 static struct kernel_param __moduleparam_const __param_##name \ 88 static struct kernel_param __moduleparam_const __param_##name \
85 __used \ 89 __used \
@@ -100,6 +104,25 @@ struct kparam_array
100#define module_param(name, type, perm) \ 104#define module_param(name, type, perm) \
101 module_param_named(name, name, type, perm) 105 module_param_named(name, name, type, perm)
102 106
107#ifndef MODULE
108/**
109 * core_param - define a historical core kernel parameter.
110 * @name: the name of the cmdline and sysfs parameter (often the same as var)
111 * @var: the variable
112 * @type: the type (for param_set_##type and param_get_##type)
113 * @perm: visibility in sysfs
114 *
115 * core_param is just like module_param(), but cannot be modular and
116 * doesn't add a prefix (such as "printk."). This is for compatibility
117 * with __setup(), and it makes sense as truly core parameters aren't
118 * tied to the particular file they're in.
119 */
120#define core_param(name, var, type, perm) \
121 param_check_##type(name, &(var)); \
122 __module_param_call("", name, param_set_##type, param_get_##type, \
123 &var, perm)
124#endif /* !MODULE */
125
103/* Actually copy string: maxlen param is usually sizeof(string). */ 126/* Actually copy string: maxlen param is usually sizeof(string). */
104#define module_param_string(name, string, len, perm) \ 127#define module_param_string(name, string, len, perm) \
105 static const struct kparam_string __param_string_##name \ 128 static const struct kparam_string __param_string_##name \
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index bcb8f725427c..5231861f357d 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -86,13 +86,6 @@ int oprofile_arch_init(struct oprofile_operations * ops);
86void oprofile_arch_exit(void); 86void oprofile_arch_exit(void);
87 87
88/** 88/**
89 * Add data to the event buffer.
90 * The data passed is free-form, but typically consists of
91 * file offsets, dcookies, context information, and ESCAPE codes.
92 */
93void add_event_entry(unsigned long data);
94
95/**
96 * Add a sample. This may be called from any context. Pass 89 * Add a sample. This may be called from any context. Pass
97 * smp_processor_id() as cpu. 90 * smp_processor_id() as cpu.
98 */ 91 */
@@ -162,5 +155,14 @@ int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, siz
162 155
163/** lock for read/write safety */ 156/** lock for read/write safety */
164extern spinlock_t oprofilefs_lock; 157extern spinlock_t oprofilefs_lock;
158
159/**
160 * Add the contents of a circular buffer to the event buffer.
161 */
162void oprofile_put_buff(unsigned long *buf, unsigned int start,
163 unsigned int stop, unsigned int max);
164
165unsigned long oprofile_get_cpu_buffer_size(void);
166void oprofile_cpu_buffer_inc_smpl_lost(void);
165 167
166#endif /* OPROFILE_H */ 168#endif /* OPROFILE_H */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 0fd39f2231ec..f546ad6fc028 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -99,5 +99,10 @@ static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
99{ 99{
100 return NULL; 100 return NULL;
101} 101}
102
103static inline void page_cgroup_init(void)
104{
105}
106
102#endif 107#endif
103#endif 108#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 085187be29c7..752def8a2ef4 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -51,6 +51,7 @@
51#include <linux/kobject.h> 51#include <linux/kobject.h>
52#include <asm/atomic.h> 52#include <asm/atomic.h>
53#include <linux/device.h> 53#include <linux/device.h>
54#include <linux/io.h>
54 55
55/* Include the ID list */ 56/* Include the ID list */
56#include <linux/pci_ids.h> 57#include <linux/pci_ids.h>
@@ -64,6 +65,11 @@ struct pci_slot {
64 struct kobject kobj; 65 struct kobject kobj;
65}; 66};
66 67
68static inline const char *pci_slot_name(const struct pci_slot *slot)
69{
70 return kobject_name(&slot->kobj);
71}
72
67/* File state for mmap()s on /proc/bus/pci/X/Y */ 73/* File state for mmap()s on /proc/bus/pci/X/Y */
68enum pci_mmap_state { 74enum pci_mmap_state {
69 pci_mmap_io, 75 pci_mmap_io,
@@ -509,9 +515,10 @@ struct pci_bus *pci_create_bus(struct device *parent, int bus,
509struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 515struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
510 int busnr); 516 int busnr);
511struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, 517struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
512 const char *name); 518 const char *name,
519 struct hotplug_slot *hotplug);
513void pci_destroy_slot(struct pci_slot *slot); 520void pci_destroy_slot(struct pci_slot *slot);
514void pci_update_slot_number(struct pci_slot *slot, int slot_nr); 521void pci_renumber_slot(struct pci_slot *slot, int slot_nr);
515int pci_scan_slot(struct pci_bus *bus, int devfn); 522int pci_scan_slot(struct pci_bus *bus, int devfn);
516struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); 523struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
517void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); 524void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -626,6 +633,8 @@ int pcix_get_mmrbc(struct pci_dev *dev);
626int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); 633int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
627int pcie_get_readrq(struct pci_dev *dev); 634int pcie_get_readrq(struct pci_dev *dev);
628int pcie_set_readrq(struct pci_dev *dev, int rq); 635int pcie_set_readrq(struct pci_dev *dev, int rq);
636int pci_reset_function(struct pci_dev *dev);
637int pci_execute_reset_function(struct pci_dev *dev);
629void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); 638void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
630int __must_check pci_assign_resource(struct pci_dev *dev, int i); 639int __must_check pci_assign_resource(struct pci_dev *dev, int i);
631int pci_select_bars(struct pci_dev *dev, unsigned long flags); 640int pci_select_bars(struct pci_dev *dev, unsigned long flags);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index a08cd06b541a..a00bd1a0f156 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -142,8 +142,6 @@ struct hotplug_slot_info {
142 142
143/** 143/**
144 * struct hotplug_slot - used to register a physical slot with the hotplug pci core 144 * struct hotplug_slot - used to register a physical slot with the hotplug pci core
145 * @name: the name of the slot being registered. This string must
146 * be unique amoung slots registered on this system.
147 * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot 145 * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
148 * @info: pointer to the &struct hotplug_slot_info for the initial values for 146 * @info: pointer to the &struct hotplug_slot_info for the initial values for
149 * this slot. 147 * this slot.
@@ -153,7 +151,6 @@ struct hotplug_slot_info {
153 * needs. 151 * needs.
154 */ 152 */
155struct hotplug_slot { 153struct hotplug_slot {
156 char *name;
157 struct hotplug_slot_ops *ops; 154 struct hotplug_slot_ops *ops;
158 struct hotplug_slot_info *info; 155 struct hotplug_slot_info *info;
159 void (*release) (struct hotplug_slot *slot); 156 void (*release) (struct hotplug_slot *slot);
@@ -165,7 +162,13 @@ struct hotplug_slot {
165}; 162};
166#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) 163#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
167 164
168extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr); 165static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
166{
167 return pci_slot_name(slot->pci_slot);
168}
169
170extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr,
171 const char *name);
169extern int pci_hp_deregister(struct hotplug_slot *slot); 172extern int pci_hp_deregister(struct hotplug_slot *slot);
170extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot, 173extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
171 struct hotplug_slot_info *info); 174 struct hotplug_slot_info *info);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index e5d344bfcb7e..369f44286353 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1944,6 +1944,14 @@
1944 1944
1945#define PCI_VENDOR_ID_OXSEMI 0x1415 1945#define PCI_VENDOR_ID_OXSEMI 0x1415
1946#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403 1946#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
1947#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000
1948#define PCI_DEVICE_ID_OXSEMI_PCIe840_G 0xC004
1949#define PCI_DEVICE_ID_OXSEMI_PCIe952_0 0xC100
1950#define PCI_DEVICE_ID_OXSEMI_PCIe952_0_G 0xC104
1951#define PCI_DEVICE_ID_OXSEMI_PCIe952_1 0xC110
1952#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_G 0xC114
1953#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118
1954#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C
1947#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 1955#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501
1948#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 1956#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
1949#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 1957#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index eb6686b88f9a..e5effd47ed74 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -377,6 +377,7 @@
377#define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */ 377#define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */
378#define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */ 378#define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */
379#define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */ 379#define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */
380#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
380#define PCI_EXP_DEVCTL 8 /* Device Control */ 381#define PCI_EXP_DEVCTL 8 /* Device Control */
381#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */ 382#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
382#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */ 383#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */
@@ -389,6 +390,7 @@
389#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */ 390#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
390#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */ 391#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */
391#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */ 392#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
393#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
392#define PCI_EXP_DEVSTA 10 /* Device Status */ 394#define PCI_EXP_DEVSTA 10 /* Device Status */
393#define PCI_EXP_DEVSTA_CED 0x01 /* Correctable Error Detected */ 395#define PCI_EXP_DEVSTA_CED 0x01 /* Correctable Error Detected */
394#define PCI_EXP_DEVSTA_NFED 0x02 /* Non-Fatal Error Detected */ 396#define PCI_EXP_DEVSTA_NFED 0x02 /* Non-Fatal Error Detected */
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 570045053ce9..a0fc32279fc0 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -19,10 +19,16 @@ struct notifier_block;
19 19
20#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS) 20#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
21void create_prof_cpu_mask(struct proc_dir_entry *de); 21void create_prof_cpu_mask(struct proc_dir_entry *de);
22int create_proc_profile(void);
22#else 23#else
23static inline void create_prof_cpu_mask(struct proc_dir_entry *de) 24static inline void create_prof_cpu_mask(struct proc_dir_entry *de)
24{ 25{
25} 26}
27
28static inline int create_proc_profile(void)
29{
30 return 0;
31}
26#endif 32#endif
27 33
28enum profile_type { 34enum profile_type {
@@ -37,7 +43,6 @@ extern int prof_on __read_mostly;
37/* init basic kernel profiler */ 43/* init basic kernel profiler */
38int profile_init(void); 44int profile_init(void);
39int profile_setup(char *str); 45int profile_setup(char *str);
40int create_proc_profile(void);
41void profile_tick(int type); 46void profile_tick(int type);
42 47
43/* 48/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5c38db536e07..10bff55b0824 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -287,7 +287,6 @@ extern void trap_init(void);
287extern void account_process_tick(struct task_struct *task, int user); 287extern void account_process_tick(struct task_struct *task, int user);
288extern void update_process_times(int user); 288extern void update_process_times(int user);
289extern void scheduler_tick(void); 289extern void scheduler_tick(void);
290extern void hrtick_resched(void);
291 290
292extern void sched_show_task(struct task_struct *p); 291extern void sched_show_task(struct task_struct *p);
293 292
@@ -1665,6 +1664,7 @@ extern unsigned int sysctl_sched_features;
1665extern unsigned int sysctl_sched_migration_cost; 1664extern unsigned int sysctl_sched_migration_cost;
1666extern unsigned int sysctl_sched_nr_migrate; 1665extern unsigned int sysctl_sched_nr_migrate;
1667extern unsigned int sysctl_sched_shares_ratelimit; 1666extern unsigned int sysctl_sched_shares_ratelimit;
1667extern unsigned int sysctl_sched_shares_thresh;
1668 1668
1669int sched_nr_latency_handler(struct ctl_table *table, int write, 1669int sched_nr_latency_handler(struct ctl_table *table, int write,
1670 struct file *file, void __user *buffer, size_t *length, 1670 struct file *file, void __user *buffer, size_t *length,
diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h
new file mode 100644
index 000000000000..a102561e7026
--- /dev/null
+++ b/include/linux/usb/wusb-wa.h
@@ -0,0 +1,271 @@
1/*
2 * Wireless USB Wire Adapter constants and structures.
3 *
4 * Copyright (C) 2005-2006 Intel Corporation.
5 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * FIXME: docs
23 * FIXME: organize properly, group logically
24 *
25 * All the event structures are defined in uwb/spec.h, as they are
26 * common to the WHCI and WUSB radio control interfaces.
27 *
28 * References:
29 * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8
30 */
31#ifndef __LINUX_USB_WUSB_WA_H
32#define __LINUX_USB_WUSB_WA_H
33
34/**
35 * Radio Command Request for the Radio Control Interface
36 *
37 * Radio Control Interface command and event codes are the same as
38 * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_*
39 */
40enum {
41 WA_EXEC_RC_CMD = 40, /* Radio Control command Request */
42};
43
44/* Wireless Adapter Requests ([WUSB] table 8-51) */
45enum {
46 WUSB_REQ_ADD_MMC_IE = 20,
47 WUSB_REQ_REMOVE_MMC_IE = 21,
48 WUSB_REQ_SET_NUM_DNTS = 22,
49 WUSB_REQ_SET_CLUSTER_ID = 23,
50 WUSB_REQ_SET_DEV_INFO = 24,
51 WUSB_REQ_GET_TIME = 25,
52 WUSB_REQ_SET_STREAM_IDX = 26,
53 WUSB_REQ_SET_WUSB_MAS = 27,
54};
55
56
57/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */
58enum {
59 WUSB_TIME_ADJ = 0,
60 WUSB_TIME_BPST = 1,
61 WUSB_TIME_WUSB = 2,
62};
63
64enum {
65 WA_ENABLE = 0x01,
66 WA_RESET = 0x02,
67 RPIPE_PAUSE = 0x1,
68};
69
70/* Responses from Get Status request ([WUSB] section 8.3.1.6) */
71enum {
72 WA_STATUS_ENABLED = 0x01,
73 WA_STATUS_RESETTING = 0x02
74};
75
76enum rpipe_crs {
77 RPIPE_CRS_CTL = 0x01,
78 RPIPE_CRS_ISO = 0x02,
79 RPIPE_CRS_BULK = 0x04,
80 RPIPE_CRS_INTR = 0x08
81};
82
83/**
84 * RPipe descriptor ([WUSB] section 8.5.2.11)
85 *
86 * FIXME: explain rpipes
87 */
88struct usb_rpipe_descriptor {
89 u8 bLength;
90 u8 bDescriptorType;
91 __le16 wRPipeIndex;
92 __le16 wRequests;
93 __le16 wBlocks; /* rw if 0 */
94 __le16 wMaxPacketSize; /* rw? */
95 u8 bHSHubAddress; /* reserved: 0 */
96 u8 bHSHubPort; /* ??? FIXME ??? */
97 u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */
98 u8 bDeviceAddress; /* rw: Target device address */
99 u8 bEndpointAddress; /* rw: Target EP address */
100 u8 bDataSequence; /* ro: Current Data sequence */
101 __le32 dwCurrentWindow; /* ro */
102 u8 bMaxDataSequence; /* ro?: max supported seq */
103 u8 bInterval; /* rw: */
104 u8 bOverTheAirInterval; /* rw: */
105 u8 bmAttribute; /* ro? */
106 u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */
107 u8 bmRetryOptions; /* rw? */
108 __le16 wNumTransactionErrors; /* rw */
109} __attribute__ ((packed));
110
111/**
112 * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4)
113 *
114 * These are the notifications coming on the notification endpoint of
115 * an HWA and a DWA.
116 */
117enum wa_notif_type {
118 DWA_NOTIF_RWAKE = 0x91,
119 DWA_NOTIF_PORTSTATUS = 0x92,
120 WA_NOTIF_TRANSFER = 0x93,
121 HWA_NOTIF_BPST_ADJ = 0x94,
122 HWA_NOTIF_DN = 0x95,
123};
124
125/**
126 * Wire Adapter notification header
127 *
128 * Notifications coming from a wire adapter use a common header
129 * defined in [WUSB] sections 8.4.5 & 8.5.4.
130 */
131struct wa_notif_hdr {
132 u8 bLength;
133 u8 bNotifyType; /* enum wa_notif_type */
134} __attribute__((packed));
135
136/**
137 * HWA DN Received notification [(WUSB] section 8.5.4.2)
138 *
139 * The DNData is specified in WUSB1.0[7.6]. For each device
140 * notification we received, we just need to dispatch it.
141 *
142 * @dndata: this is really an array of notifications, but all start
143 * with the same header.
144 */
145struct hwa_notif_dn {
146 struct wa_notif_hdr hdr;
147 u8 bSourceDeviceAddr; /* from errata 2005/07 */
148 u8 bmAttributes;
149 struct wusb_dn_hdr dndata[];
150} __attribute__((packed));
151
152/* [WUSB] section 8.3.3 */
153enum wa_xfer_type {
154 WA_XFER_TYPE_CTL = 0x80,
155 WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */
156 WA_XFER_TYPE_ISO = 0x82,
157 WA_XFER_RESULT = 0x83,
158 WA_XFER_ABORT = 0x84,
159};
160
161/* [WUSB] section 8.3.3 */
162struct wa_xfer_hdr {
163 u8 bLength; /* 0x18 */
164 u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */
165 __le16 wRPipe; /* RPipe index */
166 __le32 dwTransferID; /* Host-assigned ID */
167 __le32 dwTransferLength; /* Length of data to xfer */
168 u8 bTransferSegment;
169} __attribute__((packed));
170
171struct wa_xfer_ctl {
172 struct wa_xfer_hdr hdr;
173 u8 bmAttribute;
174 __le16 wReserved;
175 struct usb_ctrlrequest baSetupData;
176} __attribute__((packed));
177
178struct wa_xfer_bi {
179 struct wa_xfer_hdr hdr;
180 u8 bReserved;
181 __le16 wReserved;
182} __attribute__((packed));
183
184struct wa_xfer_hwaiso {
185 struct wa_xfer_hdr hdr;
186 u8 bReserved;
187 __le16 wPresentationTime;
188 __le32 dwNumOfPackets;
189 /* FIXME: u8 pktdata[]? */
190} __attribute__((packed));
191
192/* [WUSB] section 8.3.3.5 */
193struct wa_xfer_abort {
194 u8 bLength;
195 u8 bRequestType;
196 __le16 wRPipe; /* RPipe index */
197 __le32 dwTransferID; /* Host-assigned ID */
198} __attribute__((packed));
199
200/**
201 * WA Transfer Complete notification ([WUSB] section 8.3.3.3)
202 *
203 */
204struct wa_notif_xfer {
205 struct wa_notif_hdr hdr;
206 u8 bEndpoint;
207 u8 Reserved;
208} __attribute__((packed));
209
210/** Transfer result basic codes [WUSB] table 8-15 */
211enum {
212 WA_XFER_STATUS_SUCCESS,
213 WA_XFER_STATUS_HALTED,
214 WA_XFER_STATUS_DATA_BUFFER_ERROR,
215 WA_XFER_STATUS_BABBLE,
216 WA_XFER_RESERVED,
217 WA_XFER_STATUS_NOT_FOUND,
218 WA_XFER_STATUS_INSUFFICIENT_RESOURCE,
219 WA_XFER_STATUS_TRANSACTION_ERROR,
220 WA_XFER_STATUS_ABORTED,
221 WA_XFER_STATUS_RPIPE_NOT_READY,
222 WA_XFER_INVALID_FORMAT,
223 WA_XFER_UNEXPECTED_SEGMENT_NUMBER,
224 WA_XFER_STATUS_RPIPE_TYPE_MISMATCH,
225};
226
227/** [WUSB] section 8.3.3.4 */
228struct wa_xfer_result {
229 struct wa_notif_hdr hdr;
230 __le32 dwTransferID;
231 __le32 dwTransferLength;
232 u8 bTransferSegment;
233 u8 bTransferStatus;
234 __le32 dwNumOfPackets;
235} __attribute__((packed));
236
237/**
238 * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7).
239 *
240 * NOTE: u16 fields are read Little Endian from the hardware.
241 *
242 * @bNumPorts is the original max number of devices that the host can
243 * connect; we might chop this so the stack can handle
244 * it. In case you need to access it, use wusbhc->ports_max
245 * if it is a Wireless USB WA.
246 */
247struct usb_wa_descriptor {
248 u8 bLength;
249 u8 bDescriptorType;
250 u16 bcdWAVersion;
251 u8 bNumPorts; /* don't use!! */
252 u8 bmAttributes; /* Reserved == 0 */
253 u16 wNumRPipes;
254 u16 wRPipeMaxBlock;
255 u8 bRPipeBlockSize;
256 u8 bPwrOn2PwrGood;
257 u8 bNumMMCIEs;
258 u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */
259} __attribute__((packed));
260
261/**
262 * HWA Device Information Buffer (WUSB1.0[T8.54])
263 */
264struct hwa_dev_info {
265 u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */
266 u8 bDeviceAddress;
267 __le16 wPHYRates;
268 u8 bmDeviceAttribute;
269} __attribute__((packed));
270
271#endif /* #ifndef __LINUX_USB_WUSB_WA_H */
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
new file mode 100644
index 000000000000..5f401b644ed5
--- /dev/null
+++ b/include/linux/usb/wusb.h
@@ -0,0 +1,376 @@
1/*
2 * Wireless USB Standard Definitions
3 * Event Size Tables
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 * FIXME: organize properly, group logically
25 *
26 * All the event structures are defined in uwb/spec.h, as they are
27 * common to the WHCI and WUSB radio control interfaces.
28 */
29
30#ifndef __WUSB_H__
31#define __WUSB_H__
32
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/uwb/spec.h>
36#include <linux/usb/ch9.h>
37#include <linux/param.h>
38
39/**
40 * WUSB Information Element header
41 *
42 * I don't know why, they decided to make it different to the MBOA MAC
43 * IE Header; beats me.
44 */
45struct wuie_hdr {
46 u8 bLength;
47 u8 bIEIdentifier;
48} __attribute__((packed));
49
50enum {
51 WUIE_ID_WCTA = 0x80,
52 WUIE_ID_CONNECTACK,
53 WUIE_ID_HOST_INFO,
54 WUIE_ID_CHANGE_ANNOUNCE,
55 WUIE_ID_DEVICE_DISCONNECT,
56 WUIE_ID_HOST_DISCONNECT,
57 WUIE_ID_KEEP_ALIVE = 0x89,
58 WUIE_ID_ISOCH_DISCARD,
59 WUIE_ID_RESET_DEVICE,
60};
61
62/**
63 * Maximum number of array elements in a WUSB IE.
64 *
65 * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that
66 * are "arrays" have to limited to 4 elements. So we define it
67 * like that to ease up and submit only the neeed size.
68 */
69#define WUIE_ELT_MAX 4
70
71/**
72 * Wrapper for the data that defines a CHID, a CDID or a CK
73 *
74 * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of
75 * data. In order to avoid confusion and enforce types, we wrap it.
76 *
77 * Make it packed, as we use it in some hw defintions.
78 */
79struct wusb_ckhdid {
80 u8 data[16];
81} __attribute__((packed));
82
83const static
84struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
85
86#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
87
88/**
89 * WUSB IE: Host Information (WUSB1.0[7.5.2])
90 *
91 * Used to provide information about the host to the Wireless USB
92 * devices in range (CHID can be used as an ASCII string).
93 */
94struct wuie_host_info {
95 struct wuie_hdr hdr;
96 __le16 attributes;
97 struct wusb_ckhdid CHID;
98} __attribute__((packed));
99
100/**
101 * WUSB IE: Connect Ack (WUSB1.0[7.5.1])
102 *
103 * Used to acknowledge device connect requests. See note for
104 * WUIE_ELT_MAX.
105 */
106struct wuie_connect_ack {
107 struct wuie_hdr hdr;
108 struct {
109 struct wusb_ckhdid CDID;
110 u8 bDeviceAddress; /* 0 means unused */
111 u8 bReserved;
112 } blk[WUIE_ELT_MAX];
113} __attribute__((packed));
114
115/**
116 * WUSB IE Host Information Element, Connect Availability
117 *
118 * WUSB1.0[7.5.2], bmAttributes description
119 */
120enum {
121 WUIE_HI_CAP_RECONNECT = 0,
122 WUIE_HI_CAP_LIMITED,
123 WUIE_HI_CAP_RESERVED,
124 WUIE_HI_CAP_ALL,
125};
126
127/**
128 * WUSB IE: Channel Stop (WUSB1.0[7.5.8])
129 *
130 * Tells devices the host is going to stop sending MMCs and will dissapear.
131 */
132struct wuie_channel_stop {
133 struct wuie_hdr hdr;
134 u8 attributes;
135 u8 timestamp[3];
136} __attribute__((packed));
137
138/**
139 * WUSB IE: Keepalive (WUSB1.0[7.5.9])
140 *
141 * Ask device(s) to send keepalives.
142 */
143struct wuie_keep_alive {
144 struct wuie_hdr hdr;
145 u8 bDeviceAddress[WUIE_ELT_MAX];
146} __attribute__((packed));
147
148/**
149 * WUSB IE: Reset device (WUSB1.0[7.5.11])
150 *
151 * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only
152 * use it for one at the time...
153 *
154 * In any case, this request is a wee bit silly: why don't they target
155 * by address??
156 */
157struct wuie_reset {
158 struct wuie_hdr hdr;
159 struct wusb_ckhdid CDID;
160} __attribute__((packed));
161
162/**
163 * WUSB IE: Disconnect device (WUSB1.0[7.5.11])
164 *
165 * Tell device to disconnect; we can fit 4 addresses, but we only use
166 * it for one at the time...
167 */
168struct wuie_disconnect {
169 struct wuie_hdr hdr;
170 u8 bDeviceAddress;
171 u8 padding;
172} __attribute__((packed));
173
174/**
175 * WUSB IE: Host disconnect ([WUSB] section 7.5.5)
176 *
177 * Tells all connected devices to disconnect.
178 */
179struct wuie_host_disconnect {
180 struct wuie_hdr hdr;
181} __attribute__((packed));
182
183/**
184 * WUSB Device Notification header (WUSB1.0[7.6])
185 */
186struct wusb_dn_hdr {
187 u8 bType;
188 u8 notifdata[];
189} __attribute__((packed));
190
191/** Device Notification codes (WUSB1.0[Table 7-54]) */
192enum WUSB_DN {
193 WUSB_DN_CONNECT = 0x01,
194 WUSB_DN_DISCONNECT = 0x02,
195 WUSB_DN_EPRDY = 0x03,
196 WUSB_DN_MASAVAILCHANGED = 0x04,
197 WUSB_DN_RWAKE = 0x05,
198 WUSB_DN_SLEEP = 0x06,
199 WUSB_DN_ALIVE = 0x07,
200};
201
202/** WUSB Device Notification Connect */
203struct wusb_dn_connect {
204 struct wusb_dn_hdr hdr;
205 __le16 attributes;
206 struct wusb_ckhdid CDID;
207} __attribute__((packed));
208
209static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn)
210{
211 return le16_to_cpu(dn->attributes) & 0xff;
212}
213
214static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn)
215{
216 return (le16_to_cpu(dn->attributes) >> 8) & 0x1;
217}
218
219static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn)
220{
221 return (le16_to_cpu(dn->attributes) >> 9) & 0x03;
222}
223
224/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */
225struct wusb_dn_alive {
226 struct wusb_dn_hdr hdr;
227} __attribute__((packed));
228
229/** Device is disconnecting (WUSB1.0[7.6.2]) */
230struct wusb_dn_disconnect {
231 struct wusb_dn_hdr hdr;
232} __attribute__((packed));
233
234/* General constants */
235enum {
236 WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */
237};
238
239static inline size_t ckhdid_printf(char *pr_ckhdid, size_t size,
240 const struct wusb_ckhdid *ckhdid)
241{
242 return scnprintf(pr_ckhdid, size,
243 "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx "
244 "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx",
245 ckhdid->data[0], ckhdid->data[1],
246 ckhdid->data[2], ckhdid->data[3],
247 ckhdid->data[4], ckhdid->data[5],
248 ckhdid->data[6], ckhdid->data[7],
249 ckhdid->data[8], ckhdid->data[9],
250 ckhdid->data[10], ckhdid->data[11],
251 ckhdid->data[12], ckhdid->data[13],
252 ckhdid->data[14], ckhdid->data[15]);
253}
254
255/*
256 * WUSB Crypto stuff (WUSB1.0[6])
257 */
258
259extern const char *wusb_et_name(u8);
260
261/**
262 * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for
263 * the host or the device.
264 */
265static inline u8 wusb_key_index(int index, int type, int originator)
266{
267 return (originator << 6) | (type << 4) | index;
268}
269
270#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */
271#define WUSB_KEY_INDEX_TYPE_ASSOC 1
272#define WUSB_KEY_INDEX_TYPE_GTK 2
273#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
274#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
275
276/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
277struct aes_ccm_nonce {
278 u8 sfn[6]; /* Little Endian */
279 u8 tkid[3]; /* LE */
280 struct uwb_dev_addr dest_addr;
281 struct uwb_dev_addr src_addr;
282} __attribute__((packed));
283
284/* A CCM operation label, defined on WUSB1.0[6.5.x] */
285struct aes_ccm_label {
286 u8 data[14];
287} __attribute__((packed));
288
289/*
290 * Input to the key derivation sequence defined in
291 * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the
292 * PRF function.
293 */
294struct wusb_keydvt_in {
295 u8 hnonce[16];
296 u8 dnonce[16];
297} __attribute__((packed));
298
299/*
300 * Output from the key derivation sequence defined in
301 * WUSB1.0[6.5.1].
302 */
303struct wusb_keydvt_out {
304 u8 kck[16];
305 u8 ptk[16];
306} __attribute__((packed));
307
308/* Pseudo Random Function WUSB1.0[6.5] */
309extern int wusb_crypto_init(void);
310extern void wusb_crypto_exit(void);
311extern ssize_t wusb_prf(void *out, size_t out_size,
312 const u8 key[16], const struct aes_ccm_nonce *_n,
313 const struct aes_ccm_label *a,
314 const void *b, size_t blen, size_t len);
315
316static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16],
317 const struct aes_ccm_nonce *n,
318 const struct aes_ccm_label *a,
319 const void *b, size_t blen)
320{
321 return wusb_prf(out, out_size, key, n, a, b, blen, 64);
322}
323
324static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16],
325 const struct aes_ccm_nonce *n,
326 const struct aes_ccm_label *a,
327 const void *b, size_t blen)
328{
329 return wusb_prf(out, out_size, key, n, a, b, blen, 128);
330}
331
332static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16],
333 const struct aes_ccm_nonce *n,
334 const struct aes_ccm_label *a,
335 const void *b, size_t blen)
336{
337 return wusb_prf(out, out_size, key, n, a, b, blen, 256);
338}
339
340/* Key derivation WUSB1.0[6.5.1] */
341static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out,
342 const u8 key[16],
343 const struct aes_ccm_nonce *n,
344 const struct wusb_keydvt_in *keydvt_in)
345{
346 const struct aes_ccm_label a = { .data = "Pair-wise keys" };
347 return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a,
348 keydvt_in, sizeof(*keydvt_in));
349}
350
351/*
352 * Out-of-band MIC Generation WUSB1.0[6.5.2]
353 *
354 * Compute the MIC over @key, @n and @hs and place it in @mic_out.
355 *
356 * @mic_out: Where to place the 8 byte MIC tag
357 * @key: KCK from the derivation process
358 * @n: CCM nonce, n->sfn == 0, TKID as established in the
359 * process.
360 * @hs: Handshake struct for phase 2 of the 4-way.
361 * hs->bStatus and hs->bReserved are zero.
362 * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2]
363 * hs->dest_addr is the device's USB address padded with 0
364 * hs->src_addr is the hosts's UWB device address
365 * hs->mic is ignored (as we compute that value).
366 */
367static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16],
368 const struct aes_ccm_nonce *n,
369 const struct usb_handshake *hs)
370{
371 const struct aes_ccm_label a = { .data = "out-of-bandMIC" };
372 return wusb_prf_64(mic_out, 8, key, n, &a,
373 hs, sizeof(*hs) - sizeof(hs->MIC));
374}
375
376#endif /* #ifndef __WUSB_H__ */
diff --git a/include/linux/uwb.h b/include/linux/uwb.h
new file mode 100644
index 000000000000..f9ccbd9a2ced
--- /dev/null
+++ b/include/linux/uwb.h
@@ -0,0 +1,765 @@
1/*
2 * Ultra Wide Band
3 * UWB API
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: doc: overview of the API, different parts and pointers
24 */
25
26#ifndef __LINUX__UWB_H__
27#define __LINUX__UWB_H__
28
29#include <linux/limits.h>
30#include <linux/device.h>
31#include <linux/mutex.h>
32#include <linux/timer.h>
33#include <linux/workqueue.h>
34#include <linux/uwb/spec.h>
35
36struct uwb_dev;
37struct uwb_beca_e;
38struct uwb_rc;
39struct uwb_rsv;
40struct uwb_dbg;
41
42/**
43 * struct uwb_dev - a UWB Device
44 * @rc: UWB Radio Controller that discovered the device (kind of its
45 * parent).
46 * @bce: a beacon cache entry for this device; or NULL if the device
47 * is a local radio controller.
48 * @mac_addr: the EUI-48 address of this device.
49 * @dev_addr: the current DevAddr used by this device.
50 * @beacon_slot: the slot number the beacon is using.
51 * @streams: bitmap of streams allocated to reservations targeted at
52 * this device. For an RC, this is the streams allocated for
53 * reservations targeted at DevAddrs.
54 *
55 * A UWB device may either by a neighbor or part of a local radio
56 * controller.
57 */
58struct uwb_dev {
59 struct mutex mutex;
60 struct list_head list_node;
61 struct device dev;
62 struct uwb_rc *rc; /* radio controller */
63 struct uwb_beca_e *bce; /* Beacon Cache Entry */
64
65 struct uwb_mac_addr mac_addr;
66 struct uwb_dev_addr dev_addr;
67 int beacon_slot;
68 DECLARE_BITMAP(streams, UWB_NUM_STREAMS);
69};
70#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev)
71
72/**
73 * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs
74 *
75 * RC[CE]Bs have a 'context ID' field that matches the command with
76 * the event received to confirm it.
77 *
78 * Maximum number of context IDs
79 */
80enum { UWB_RC_CTX_MAX = 256 };
81
82
83/** Notification chain head for UWB generated events to listeners */
84struct uwb_notifs_chain {
85 struct list_head list;
86 struct mutex mutex;
87};
88
89/**
90 * struct uwb_mas_bm - a bitmap of all MAS in a superframe
91 * @bm: a bitmap of length #UWB_NUM_MAS
92 */
93struct uwb_mas_bm {
94 DECLARE_BITMAP(bm, UWB_NUM_MAS);
95};
96
97/**
98 * uwb_rsv_state - UWB Reservation state.
99 *
100 * NONE - reservation is not active (no DRP IE being transmitted).
101 *
102 * Owner reservation states:
103 *
104 * INITIATED - owner has sent an initial DRP request.
105 * PENDING - target responded with pending Reason Code.
106 * MODIFIED - reservation manager is modifying an established
107 * reservation with a different MAS allocation.
108 * ESTABLISHED - the reservation has been successfully negotiated.
109 *
110 * Target reservation states:
111 *
112 * DENIED - request is denied.
113 * ACCEPTED - request is accepted.
114 * PENDING - PAL has yet to make a decision to whether to accept or
115 * deny.
116 *
117 * FIXME: further target states TBD.
118 */
119enum uwb_rsv_state {
120 UWB_RSV_STATE_NONE,
121 UWB_RSV_STATE_O_INITIATED,
122 UWB_RSV_STATE_O_PENDING,
123 UWB_RSV_STATE_O_MODIFIED,
124 UWB_RSV_STATE_O_ESTABLISHED,
125 UWB_RSV_STATE_T_ACCEPTED,
126 UWB_RSV_STATE_T_DENIED,
127 UWB_RSV_STATE_T_PENDING,
128
129 UWB_RSV_STATE_LAST,
130};
131
132enum uwb_rsv_target_type {
133 UWB_RSV_TARGET_DEV,
134 UWB_RSV_TARGET_DEVADDR,
135};
136
137/**
138 * struct uwb_rsv_target - the target of a reservation.
139 *
140 * Reservations unicast and targeted at a single device
141 * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a
142 * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR).
143 */
144struct uwb_rsv_target {
145 enum uwb_rsv_target_type type;
146 union {
147 struct uwb_dev *dev;
148 struct uwb_dev_addr devaddr;
149 };
150};
151
152/*
153 * Number of streams reserved for reservations targeted at DevAddrs.
154 */
155#define UWB_NUM_GLOBAL_STREAMS 1
156
157typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv);
158
159/**
160 * struct uwb_rsv - a DRP reservation
161 *
162 * Data structure management:
163 *
164 * @rc: the radio controller this reservation is for
165 * (as target or owner)
166 * @rc_node: a list node for the RC
167 * @pal_node: a list node for the PAL
168 *
169 * Owner and target parameters:
170 *
171 * @owner: the UWB device owning this reservation
172 * @target: the target UWB device
173 * @type: reservation type
174 *
175 * Owner parameters:
176 *
177 * @max_mas: maxiumum number of MAS
178 * @min_mas: minimum number of MAS
179 * @sparsity: owner selected sparsity
180 * @is_multicast: true iff multicast
181 *
182 * @callback: callback function when the reservation completes
183 * @pal_priv: private data for the PAL making the reservation
184 *
185 * Reservation status:
186 *
187 * @status: negotiation status
188 * @stream: stream index allocated for this reservation
189 * @mas: reserved MAS
190 * @drp_ie: the DRP IE
191 * @ie_valid: true iff the DRP IE matches the reservation parameters
192 *
193 * DRP reservations are uniquely identified by the owner, target and
194 * stream index. However, when using a DevAddr as a target (e.g., for
195 * a WUSB cluster reservation) the responses may be received from
196 * devices with different DevAddrs. In this case, reservations are
197 * uniquely identified by just the stream index. A number of stream
198 * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this.
199 */
200struct uwb_rsv {
201 struct uwb_rc *rc;
202 struct list_head rc_node;
203 struct list_head pal_node;
204
205 struct uwb_dev *owner;
206 struct uwb_rsv_target target;
207 enum uwb_drp_type type;
208 int max_mas;
209 int min_mas;
210 int sparsity;
211 bool is_multicast;
212
213 uwb_rsv_cb_f callback;
214 void *pal_priv;
215
216 enum uwb_rsv_state state;
217 u8 stream;
218 struct uwb_mas_bm mas;
219 struct uwb_ie_drp *drp_ie;
220 bool ie_valid;
221 struct timer_list timer;
222 bool expired;
223};
224
225static const
226struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } };
227
228static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas)
229{
230 bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS);
231}
232
233/**
234 * struct uwb_drp_avail - a radio controller's view of MAS usage
235 * @global: MAS unused by neighbors (excluding reservations targetted
236 * or owned by the local radio controller) or the beaon period
237 * @local: MAS unused by local established reservations
238 * @pending: MAS unused by local pending reservations
239 * @ie: DRP Availability IE to be included in the beacon
240 * @ie_valid: true iff @ie is valid and does not need to regenerated from
241 * @global and @local
242 *
243 * Each radio controller maintains a view of MAS usage or
244 * availability. MAS available for a new reservation are determined
245 * from the intersection of @global, @local, and @pending.
246 *
247 * The radio controller must transmit a DRP Availability IE that's the
248 * intersection of @global and @local.
249 *
250 * A set bit indicates the MAS is unused and available.
251 *
252 * rc->rsvs_mutex should be held before accessing this data structure.
253 *
254 * [ECMA-368] section 17.4.3.
255 */
256struct uwb_drp_avail {
257 DECLARE_BITMAP(global, UWB_NUM_MAS);
258 DECLARE_BITMAP(local, UWB_NUM_MAS);
259 DECLARE_BITMAP(pending, UWB_NUM_MAS);
260 struct uwb_ie_drp_avail ie;
261 bool ie_valid;
262};
263
264
265const char *uwb_rsv_state_str(enum uwb_rsv_state state);
266const char *uwb_rsv_type_str(enum uwb_drp_type type);
267
268struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb,
269 void *pal_priv);
270void uwb_rsv_destroy(struct uwb_rsv *rsv);
271
272int uwb_rsv_establish(struct uwb_rsv *rsv);
273int uwb_rsv_modify(struct uwb_rsv *rsv,
274 int max_mas, int min_mas, int sparsity);
275void uwb_rsv_terminate(struct uwb_rsv *rsv);
276
277void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv);
278
279/**
280 * Radio Control Interface instance
281 *
282 *
283 * Life cycle rules: those of the UWB Device.
284 *
285 * @index: an index number for this radio controller, as used in the
286 * device name.
287 * @version: version of protocol supported by this device
288 * @priv: Backend implementation; rw with uwb_dev.dev.sem taken.
289 * @cmd: Backend implementation to execute commands; rw and call
290 * only with uwb_dev.dev.sem taken.
291 * @reset: Hardware reset of radio controller and any PAL controllers.
292 * @filter: Backend implementation to manipulate data to and from device
293 * to be compliant to specification assumed by driver (WHCI
294 * 0.95).
295 *
296 * uwb_dev.dev.mutex is used to execute commands and update
297 * the corresponding structures; can't use a spinlock
298 * because rc->cmd() can sleep.
299 * @ies: This is a dynamically allocated array cacheing the
300 * IEs (settable by the host) that the beacon of this
301 * radio controller is currently sending.
302 *
303 * In reality, we store here the full command we set to
304 * the radio controller (which is basically a command
305 * prefix followed by all the IEs the beacon currently
306 * contains). This way we don't have to realloc and
307 * memcpy when setting it.
308 *
309 * We set this up in uwb_rc_ie_setup(), where we alloc
310 * this struct, call get_ie() [so we know which IEs are
311 * currently being sent, if any].
312 *
313 * @ies_capacity:Amount of space (in bytes) allocated in @ies. The
314 * amount used is given by sizeof(*ies) plus ies->wIELength
315 * (which is a little endian quantity all the time).
316 * @ies_mutex: protect the IE cache
317 * @dbg: information for the debug interface
318 */
319struct uwb_rc {
320 struct uwb_dev uwb_dev;
321 int index;
322 u16 version;
323
324 struct module *owner;
325 void *priv;
326 int (*start)(struct uwb_rc *rc);
327 void (*stop)(struct uwb_rc *rc);
328 int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t);
329 int (*reset)(struct uwb_rc *rc);
330 int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *);
331 int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t,
332 size_t *, size_t *);
333
334 spinlock_t neh_lock; /* protects neh_* and ctx_* */
335 struct list_head neh_list; /* Open NE handles */
336 unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)];
337 u8 ctx_roll;
338
339 int beaconing; /* Beaconing state [channel number] */
340 int scanning;
341 enum uwb_scan_type scan_type:3;
342 unsigned ready:1;
343 struct uwb_notifs_chain notifs_chain;
344
345 struct uwb_drp_avail drp_avail;
346 struct list_head reservations;
347 struct mutex rsvs_mutex;
348 struct workqueue_struct *rsv_workq;
349 struct work_struct rsv_update_work;
350
351 struct mutex ies_mutex;
352 struct uwb_rc_cmd_set_ie *ies;
353 size_t ies_capacity;
354
355 spinlock_t pal_lock;
356 struct list_head pals;
357
358 struct uwb_dbg *dbg;
359};
360
361
362/**
363 * struct uwb_pal - a UWB PAL
364 * @name: descriptive name for this PAL (wushc, wlp, etc.).
365 * @device: a device for the PAL. Used to link the PAL and the radio
366 * controller in sysfs.
367 * @new_rsv: called when a peer requests a reservation (may be NULL if
368 * the PAL cannot accept reservation requests).
369 *
370 * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB
371 * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP).
372 *
373 * The PALs using a radio controller must register themselves to
374 * permit the UWB stack to coordinate usage of the radio between the
375 * various PALs or to allow PALs to response to certain requests from
376 * peers.
377 *
378 * A struct uwb_pal should be embedded in a containing structure
379 * belonging to the PAL and initialized with uwb_pal_init()). Fields
380 * should be set appropriately by the PAL before registering the PAL
381 * with uwb_pal_register().
382 */
383struct uwb_pal {
384 struct list_head node;
385 const char *name;
386 struct device *device;
387 void (*new_rsv)(struct uwb_rsv *rsv);
388};
389
390void uwb_pal_init(struct uwb_pal *pal);
391int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal);
392void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal);
393
394/*
395 * General public API
396 *
397 * This API can be used by UWB device drivers or by those implementing
398 * UWB Radio Controllers
399 */
400struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
401 const struct uwb_dev_addr *devaddr);
402struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *);
403static inline void uwb_dev_get(struct uwb_dev *uwb_dev)
404{
405 get_device(&uwb_dev->dev);
406}
407static inline void uwb_dev_put(struct uwb_dev *uwb_dev)
408{
409 put_device(&uwb_dev->dev);
410}
411struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev);
412
413/**
414 * Callback function for 'uwb_{dev,rc}_foreach()'.
415 *
416 * @dev: Linux device instance
417 * 'uwb_dev = container_of(dev, struct uwb_dev, dev)'
418 * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'.
419 *
420 * @returns: 0 to continue the iterations, any other val to stop
421 * iterating and return the value to the caller of
422 * _foreach().
423 */
424typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv);
425int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv);
426
427struct uwb_rc *uwb_rc_alloc(void);
428struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *);
429struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *);
430void uwb_rc_put(struct uwb_rc *rc);
431
432typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg,
433 struct uwb_rceb *reply, ssize_t reply_size);
434
435int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
436 struct uwb_rccb *cmd, size_t cmd_size,
437 u8 expected_type, u16 expected_event,
438 uwb_rc_cmd_cb_f cb, void *arg);
439ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
440 struct uwb_rccb *cmd, size_t cmd_size,
441 struct uwb_rceb *reply, size_t reply_size);
442ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
443 struct uwb_rccb *cmd, size_t cmd_size,
444 u8 expected_type, u16 expected_event,
445 struct uwb_rceb **preply);
446ssize_t uwb_rc_get_ie(struct uwb_rc *, struct uwb_rc_evt_get_ie **);
447int uwb_bg_joined(struct uwb_rc *rc);
448
449size_t __uwb_addr_print(char *, size_t, const unsigned char *, int);
450
451int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *);
452int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *);
453int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *);
454int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *);
455int __uwb_mac_addr_assigned_check(struct device *, void *);
456int __uwb_dev_addr_assigned_check(struct device *, void *);
457
458/* Print in @buf a pretty repr of @addr */
459static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size,
460 const struct uwb_dev_addr *addr)
461{
462 return __uwb_addr_print(buf, buf_size, addr->data, 0);
463}
464
465/* Print in @buf a pretty repr of @addr */
466static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size,
467 const struct uwb_mac_addr *addr)
468{
469 return __uwb_addr_print(buf, buf_size, addr->data, 1);
470}
471
472/* @returns 0 if device addresses @addr2 and @addr1 are equal */
473static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1,
474 const struct uwb_dev_addr *addr2)
475{
476 return memcmp(addr1, addr2, sizeof(*addr1));
477}
478
479/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */
480static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1,
481 const struct uwb_mac_addr *addr2)
482{
483 return memcmp(addr1, addr2, sizeof(*addr1));
484}
485
486/* @returns !0 if a MAC @addr is a broadcast address */
487static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr)
488{
489 struct uwb_mac_addr bcast = {
490 .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
491 };
492 return !uwb_mac_addr_cmp(addr, &bcast);
493}
494
495/* @returns !0 if a MAC @addr is all zeroes*/
496static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr)
497{
498 struct uwb_mac_addr unset = {
499 .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
500 };
501 return !uwb_mac_addr_cmp(addr, &unset);
502}
503
504/* @returns !0 if the address is in use. */
505static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc,
506 struct uwb_dev_addr *addr)
507{
508 return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr);
509}
510
511/*
512 * UWB Radio Controller API
513 *
514 * This API is used (in addition to the general API) to implement UWB
515 * Radio Controllers.
516 */
517void uwb_rc_init(struct uwb_rc *);
518int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv);
519void uwb_rc_rm(struct uwb_rc *);
520void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t);
521void uwb_rc_neh_error(struct uwb_rc *, int);
522void uwb_rc_reset_all(struct uwb_rc *rc);
523
524/**
525 * uwb_rsv_is_owner - is the owner of this reservation the RC?
526 * @rsv: the reservation
527 */
528static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv)
529{
530 return rsv->owner == &rsv->rc->uwb_dev;
531}
532
533/**
534 * Events generated by UWB that can be passed to any listeners
535 *
536 * Higher layers can register callback functions with the radio
537 * controller using uwb_notifs_register(). The radio controller
538 * maintains a list of all registered handlers and will notify all
539 * nodes when an event occurs.
540 */
541enum uwb_notifs {
542 UWB_NOTIF_BG_JOIN = 0, /* radio controller joined a beacon group */
543 UWB_NOTIF_BG_LEAVE = 1, /* radio controller left a beacon group */
544 UWB_NOTIF_ONAIR,
545 UWB_NOTIF_OFFAIR,
546};
547
548/* Callback function registered with UWB */
549struct uwb_notifs_handler {
550 struct list_head list_node;
551 void (*cb)(void *, struct uwb_dev *, enum uwb_notifs);
552 void *data;
553};
554
555int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *);
556int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *);
557
558
559/**
560 * UWB radio controller Event Size Entry (for creating entry tables)
561 *
562 * WUSB and WHCI define events and notifications, and they might have
563 * fixed or variable size.
564 *
565 * Each event/notification has a size which is not necessarily known
566 * in advance based on the event code. As well, vendor specific
567 * events/notifications will have a size impossible to determine
568 * unless we know about the device's specific details.
569 *
570 * It was way too smart of the spec writers not to think that it would
571 * be impossible for a generic driver to skip over vendor specific
572 * events/notifications if there are no LENGTH fields in the HEADER of
573 * each message...the transaction size cannot be counted on as the
574 * spec does not forbid to pack more than one event in a single
575 * transaction.
576 *
577 * Thus, we guess sizes with tables (or for events, when you know the
578 * size ahead of time you can use uwb_rc_neh_extra_size*()). We
579 * register tables with the known events and their sizes, and then we
580 * traverse those tables. For those with variable length, we provide a
581 * way to lookup the size inside the event/notification's
582 * payload. This allows device-specific event size tables to be
583 * registered.
584 *
585 * @size: Size of the payload
586 *
587 * @offset: if != 0, at offset @offset-1 starts a field with a length
588 * that has to be added to @size. The format of the field is
589 * given by @type.
590 *
591 * @type: Type and length of the offset field. Most common is LE 16
592 * bits (that's why that is zero); others are there mostly to
593 * cover for bugs and weirdos.
594 */
595struct uwb_est_entry {
596 size_t size;
597 unsigned offset;
598 enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type;
599};
600
601int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product,
602 const struct uwb_est_entry *, size_t entries);
603int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product,
604 const struct uwb_est_entry *, size_t entries);
605ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
606 size_t len);
607
608/* -- Misc */
609
610enum {
611 EDC_MAX_ERRORS = 10,
612 EDC_ERROR_TIMEFRAME = HZ,
613};
614
615/* error density counter */
616struct edc {
617 unsigned long timestart;
618 u16 errorcount;
619};
620
621static inline
622void edc_init(struct edc *edc)
623{
624 edc->timestart = jiffies;
625}
626
627/* Called when an error occured.
628 * This is way to determine if the number of acceptable errors per time
629 * period has been exceeded. It is not accurate as there are cases in which
630 * this scheme will not work, for example if there are periodic occurences
631 * of errors that straddle updates to the start time. This scheme is
632 * sufficient for our usage.
633 *
634 * @returns 1 if maximum acceptable errors per timeframe has been exceeded.
635 */
636static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe)
637{
638 unsigned long now;
639
640 now = jiffies;
641 if (now - err_hist->timestart > timeframe) {
642 err_hist->errorcount = 1;
643 err_hist->timestart = now;
644 } else if (++err_hist->errorcount > max_err) {
645 err_hist->errorcount = 0;
646 err_hist->timestart = now;
647 return 1;
648 }
649 return 0;
650}
651
652
653/* Information Element handling */
654
655/* For representing the state of writing to a buffer when iterating */
656struct uwb_buf_ctx {
657 char *buf;
658 size_t bytes, size;
659};
660
661typedef int (*uwb_ie_f)(struct uwb_dev *, const struct uwb_ie_hdr *,
662 size_t, void *);
663struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
664ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data,
665 const void *buf, size_t size);
666int uwb_ie_dump_hex(struct uwb_dev *, const struct uwb_ie_hdr *,
667 size_t, void *);
668int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *);
669struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
670
671
672/*
673 * Transmission statistics
674 *
675 * UWB uses LQI and RSSI (one byte values) for reporting radio signal
676 * strength and line quality indication. We do quick and dirty
677 * averages of those. They are signed values, btw.
678 *
679 * For 8 bit quantities, we keep the min, the max, an accumulator
680 * (@sigma) and a # of samples. When @samples gets to 255, we compute
681 * the average (@sigma / @samples), place it in @sigma and reset
682 * @samples to 1 (so we use it as the first sample).
683 *
684 * Now, statistically speaking, probably I am kicking the kidneys of
685 * some books I have in my shelves collecting dust, but I just want to
686 * get an approx, not the Nobel.
687 *
688 * LOCKING: there is no locking per se, but we try to keep a lockless
689 * schema. Only _add_samples() modifies the values--as long as you
690 * have other locking on top that makes sure that no two calls of
691 * _add_sample() happen at the same time, then we are fine. Now, for
692 * resetting the values we just set @samples to 0 and that makes the
693 * next _add_sample() to start with defaults. Reading the values in
694 * _show() currently can race, so you need to make sure the calls are
695 * under the same lock that protects calls to _add_sample(). FIXME:
696 * currently unlocked (It is not ultraprecise but does the trick. Bite
697 * me).
698 */
699struct stats {
700 s8 min, max;
701 s16 sigma;
702 atomic_t samples;
703};
704
705static inline
706void stats_init(struct stats *stats)
707{
708 atomic_set(&stats->samples, 0);
709 wmb();
710}
711
712static inline
713void stats_add_sample(struct stats *stats, s8 sample)
714{
715 s8 min, max;
716 s16 sigma;
717 unsigned samples = atomic_read(&stats->samples);
718 if (samples == 0) { /* it was zero before, so we initialize */
719 min = 127;
720 max = -128;
721 sigma = 0;
722 } else {
723 min = stats->min;
724 max = stats->max;
725 sigma = stats->sigma;
726 }
727
728 if (sample < min) /* compute new values */
729 min = sample;
730 else if (sample > max)
731 max = sample;
732 sigma += sample;
733
734 stats->min = min; /* commit */
735 stats->max = max;
736 stats->sigma = sigma;
737 if (atomic_add_return(1, &stats->samples) > 255) {
738 /* wrapped around! reset */
739 stats->sigma = sigma / 256;
740 atomic_set(&stats->samples, 1);
741 }
742}
743
744static inline ssize_t stats_show(struct stats *stats, char *buf)
745{
746 int min, max, avg;
747 int samples = atomic_read(&stats->samples);
748 if (samples == 0)
749 min = max = avg = 0;
750 else {
751 min = stats->min;
752 max = stats->max;
753 avg = stats->sigma / samples;
754 }
755 return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg);
756}
757
758static inline ssize_t stats_store(struct stats *stats, const char *buf,
759 size_t size)
760{
761 stats_init(stats);
762 return size;
763}
764
765#endif /* #ifndef __LINUX__UWB_H__ */
diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h
new file mode 100644
index 000000000000..1141f41bab5c
--- /dev/null
+++ b/include/linux/uwb/debug-cmd.h
@@ -0,0 +1,57 @@
1/*
2 * Ultra Wide Band
3 * Debug interface commands
4 *
5 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __LINUX__UWB__DEBUG_CMD_H__
20#define __LINUX__UWB__DEBUG_CMD_H__
21
22#include <linux/types.h>
23
24/*
25 * Debug interface commands
26 *
27 * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation.
28 *
29 * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation.
30 */
31
32enum uwb_dbg_cmd_type {
33 UWB_DBG_CMD_RSV_ESTABLISH = 1,
34 UWB_DBG_CMD_RSV_TERMINATE = 2,
35};
36
37struct uwb_dbg_cmd_rsv_establish {
38 __u8 target[6];
39 __u8 type;
40 __u16 max_mas;
41 __u16 min_mas;
42 __u8 sparsity;
43};
44
45struct uwb_dbg_cmd_rsv_terminate {
46 int index;
47};
48
49struct uwb_dbg_cmd {
50 __u32 type;
51 union {
52 struct uwb_dbg_cmd_rsv_establish rsv_establish;
53 struct uwb_dbg_cmd_rsv_terminate rsv_terminate;
54 };
55};
56
57#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */
diff --git a/include/linux/uwb/debug.h b/include/linux/uwb/debug.h
new file mode 100644
index 000000000000..a86a73fe303f
--- /dev/null
+++ b/include/linux/uwb/debug.h
@@ -0,0 +1,82 @@
1/*
2 * Ultra Wide Band
3 * Debug Support
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: doc
24 * Invoke like:
25 *
26 * #define D_LOCAL 4
27 * #include <linux/uwb/debug.h>
28 *
29 * At the end of your include files.
30 */
31#include <linux/types.h>
32
33struct device;
34extern void dump_bytes(struct device *dev, const void *_buf, size_t rsize);
35
36/* Master debug switch; !0 enables, 0 disables */
37#define D_MASTER (!0)
38
39/* Local (per-file) debug switch; #define before #including */
40#ifndef D_LOCAL
41#define D_LOCAL 0
42#endif
43
44#undef __d_printf
45#undef d_fnstart
46#undef d_fnend
47#undef d_printf
48#undef d_dump
49
50#define __d_printf(l, _tag, _dev, f, a...) \
51do { \
52 struct device *__dev = (_dev); \
53 if (D_MASTER && D_LOCAL >= (l)) { \
54 char __head[64] = ""; \
55 if (_dev != NULL) { \
56 if ((unsigned long)__dev < 4096) \
57 printk(KERN_ERR "E: Corrupt dev %p\n", \
58 __dev); \
59 else \
60 snprintf(__head, sizeof(__head), \
61 "%s %s: ", \
62 dev_driver_string(__dev), \
63 __dev->bus_id); \
64 } \
65 printk(KERN_ERR "%s%s" _tag ": " f, __head, \
66 __func__, ## a); \
67 } \
68} while (0 && _dev)
69
70#define d_fnstart(l, _dev, f, a...) \
71 __d_printf(l, " FNSTART", _dev, f, ## a)
72#define d_fnend(l, _dev, f, a...) \
73 __d_printf(l, " FNEND", _dev, f, ## a)
74#define d_printf(l, _dev, f, a...) \
75 __d_printf(l, "", _dev, f, ## a)
76#define d_dump(l, _dev, ptr, size) \
77do { \
78 struct device *__dev = _dev; \
79 if (D_MASTER && D_LOCAL >= (l)) \
80 dump_bytes(__dev, ptr, size); \
81} while (0 && _dev)
82#define d_test(l) (D_MASTER && D_LOCAL >= (l))
diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h
new file mode 100644
index 000000000000..198c15f8e251
--- /dev/null
+++ b/include/linux/uwb/spec.h
@@ -0,0 +1,727 @@
1/*
2 * Ultra Wide Band
3 * UWB Standard definitions
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * All these definitions are based on the ECMA-368 standard.
24 *
25 * Note all definitions are Little Endian in the wire, and we will
26 * convert them to host order before operating on the bitfields (that
27 * yes, we use extensively).
28 */
29
30#ifndef __LINUX__UWB_SPEC_H__
31#define __LINUX__UWB_SPEC_H__
32
33#include <linux/types.h>
34#include <linux/bitmap.h>
35
36#define i1480_FW 0x00000303
37/* #define i1480_FW 0x00000302 */
38
39/**
40 * Number of Medium Access Slots in a superframe.
41 *
42 * UWB divides time in SuperFrames, each one divided in 256 pieces, or
43 * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the
44 * basic bandwidth allocation unit in UWB.
45 */
46enum { UWB_NUM_MAS = 256 };
47
48/**
49 * Number of Zones in superframe.
50 *
51 * UWB divides the superframe into zones with numbering starting from BPST.
52 * See MBOA MAC[16.8.6]
53 */
54enum { UWB_NUM_ZONES = 16 };
55
56/*
57 * Number of MAS in a zone.
58 */
59#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES)
60
61/*
62 * Number of streams per DRP reservation between a pair of devices.
63 *
64 * [ECMA-368] section 16.8.6.
65 */
66enum { UWB_NUM_STREAMS = 8 };
67
68/*
69 * mMasLength
70 *
71 * The length of a MAS in microseconds.
72 *
73 * [ECMA-368] section 17.16.
74 */
75enum { UWB_MAS_LENGTH_US = 256 };
76
77/*
78 * mBeaconSlotLength
79 *
80 * The length of the beacon slot in microseconds.
81 *
82 * [ECMA-368] section 17.16
83 */
84enum { UWB_BEACON_SLOT_LENGTH_US = 85 };
85
86/*
87 * mMaxLostBeacons
88 *
89 * The number beacons missing in consecutive superframes before a
90 * device can be considered as unreachable.
91 *
92 * [ECMA-368] section 17.16
93 */
94enum { UWB_MAX_LOST_BEACONS = 3 };
95
96/*
97 * Length of a superframe in microseconds.
98 */
99#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS)
100
101/**
102 * UWB MAC address
103 *
104 * It is *imperative* that this struct is exactly 6 packed bytes (as
105 * it is also used to define headers sent down and up the wire/radio).
106 */
107struct uwb_mac_addr {
108 u8 data[6];
109} __attribute__((packed));
110
111
112/**
113 * UWB device address
114 *
115 * It is *imperative* that this struct is exactly 6 packed bytes (as
116 * it is also used to define headers sent down and up the wire/radio).
117 */
118struct uwb_dev_addr {
119 u8 data[2];
120} __attribute__((packed));
121
122
123/**
124 * Types of UWB addresses
125 *
126 * Order matters (by size).
127 */
128enum uwb_addr_type {
129 UWB_ADDR_DEV = 0,
130 UWB_ADDR_MAC = 1,
131};
132
133
134/** Size of a char buffer for printing a MAC/device address */
135enum { UWB_ADDR_STRSIZE = 32 };
136
137
138/** UWB WiMedia protocol IDs. */
139enum uwb_prid {
140 UWB_PRID_WLP_RESERVED = 0x0000,
141 UWB_PRID_WLP = 0x0001,
142 UWB_PRID_WUSB_BOT = 0x0010,
143 UWB_PRID_WUSB = 0x0010,
144 UWB_PRID_WUSB_TOP = 0x001F,
145};
146
147
148/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */
149enum uwb_phy_rate {
150 UWB_PHY_RATE_53 = 0,
151 UWB_PHY_RATE_80,
152 UWB_PHY_RATE_106,
153 UWB_PHY_RATE_160,
154 UWB_PHY_RATE_200,
155 UWB_PHY_RATE_320,
156 UWB_PHY_RATE_400,
157 UWB_PHY_RATE_480,
158 UWB_PHY_RATE_INVALID
159};
160
161
162/**
163 * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78])
164 */
165enum uwb_scan_type {
166 UWB_SCAN_ONLY = 0,
167 UWB_SCAN_OUTSIDE_BP,
168 UWB_SCAN_WHILE_INACTIVE,
169 UWB_SCAN_DISABLED,
170 UWB_SCAN_ONLY_STARTTIME,
171 UWB_SCAN_TOP
172};
173
174
175/** ACK Policy types (MBOA MAC[7.2.1.3]) */
176enum uwb_ack_pol {
177 UWB_ACK_NO = 0,
178 UWB_ACK_INM = 1,
179 UWB_ACK_B = 2,
180 UWB_ACK_B_REQ = 3,
181};
182
183
184/** DRP reservation types ([ECMA-368 table 106) */
185enum uwb_drp_type {
186 UWB_DRP_TYPE_ALIEN_BP = 0,
187 UWB_DRP_TYPE_HARD,
188 UWB_DRP_TYPE_SOFT,
189 UWB_DRP_TYPE_PRIVATE,
190 UWB_DRP_TYPE_PCA,
191};
192
193
194/** DRP Reason Codes ([ECMA-368] table 107) */
195enum uwb_drp_reason {
196 UWB_DRP_REASON_ACCEPTED = 0,
197 UWB_DRP_REASON_CONFLICT,
198 UWB_DRP_REASON_PENDING,
199 UWB_DRP_REASON_DENIED,
200 UWB_DRP_REASON_MODIFIED,
201};
202
203/**
204 * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9])
205 */
206enum uwb_drp_notif_reason {
207 UWB_DRP_NOTIF_DRP_IE_RCVD = 0,
208 UWB_DRP_NOTIF_CONFLICT,
209 UWB_DRP_NOTIF_TERMINATE,
210};
211
212
213/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */
214struct uwb_drp_alloc {
215 __le16 zone_bm;
216 __le16 mas_bm;
217} __attribute__((packed));
218
219
220/** General MAC Header format (ECMA-368[16.2]) */
221struct uwb_mac_frame_hdr {
222 __le16 Frame_Control;
223 struct uwb_dev_addr DestAddr;
224 struct uwb_dev_addr SrcAddr;
225 __le16 Sequence_Control;
226 __le16 Access_Information;
227} __attribute__((packed));
228
229
230/**
231 * uwb_beacon_frame - a beacon frame including MAC headers
232 *
233 * [ECMA] section 16.3.
234 */
235struct uwb_beacon_frame {
236 struct uwb_mac_frame_hdr hdr;
237 struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */
238 u8 Beacon_Slot_Number;
239 u8 Device_Control;
240 u8 IEData[];
241} __attribute__((packed));
242
243
244/** Information Element codes (MBOA MAC[T54]) */
245enum uwb_ie {
246 UWB_PCA_AVAILABILITY = 2,
247 UWB_IE_DRP_AVAILABILITY = 8,
248 UWB_IE_DRP = 9,
249 UWB_BP_SWITCH_IE = 11,
250 UWB_MAC_CAPABILITIES_IE = 12,
251 UWB_PHY_CAPABILITIES_IE = 13,
252 UWB_APP_SPEC_PROBE_IE = 15,
253 UWB_IDENTIFICATION_IE = 19,
254 UWB_MASTER_KEY_ID_IE = 20,
255 UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */
256 UWB_APP_SPEC_IE = 255,
257};
258
259
260/**
261 * Header common to all Information Elements (IEs)
262 */
263struct uwb_ie_hdr {
264 u8 element_id; /* enum uwb_ie */
265 u8 length;
266} __attribute__((packed));
267
268
269/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */
270struct uwb_ie_drp {
271 struct uwb_ie_hdr hdr;
272 __le16 drp_control;
273 struct uwb_dev_addr dev_addr;
274 struct uwb_drp_alloc allocs[];
275} __attribute__((packed));
276
277static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie)
278{
279 return (le16_to_cpu(ie->drp_control) >> 0) & 0x7;
280}
281
282static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie)
283{
284 return (le16_to_cpu(ie->drp_control) >> 3) & 0x7;
285}
286
287static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie)
288{
289 return (le16_to_cpu(ie->drp_control) >> 6) & 0x7;
290}
291
292static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie)
293{
294 return (le16_to_cpu(ie->drp_control) >> 9) & 0x1;
295}
296
297static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie)
298{
299 return (le16_to_cpu(ie->drp_control) >> 10) & 0x1;
300}
301
302static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie)
303{
304 return (le16_to_cpu(ie->drp_control) >> 11) & 0x1;
305}
306
307static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie)
308{
309 return (le16_to_cpu(ie->drp_control) >> 12) & 0x1;
310}
311
312static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type)
313{
314 u16 drp_control = le16_to_cpu(ie->drp_control);
315 drp_control = (drp_control & ~(0x7 << 0)) | (type << 0);
316 ie->drp_control = cpu_to_le16(drp_control);
317}
318
319static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index)
320{
321 u16 drp_control = le16_to_cpu(ie->drp_control);
322 drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3);
323 ie->drp_control = cpu_to_le16(drp_control);
324}
325
326static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie,
327 enum uwb_drp_reason reason_code)
328{
329 u16 drp_control = le16_to_cpu(ie->drp_control);
330 drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6);
331 ie->drp_control = cpu_to_le16(drp_control);
332}
333
334static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status)
335{
336 u16 drp_control = le16_to_cpu(ie->drp_control);
337 drp_control = (drp_control & ~(0x1 << 9)) | (status << 9);
338 ie->drp_control = cpu_to_le16(drp_control);
339}
340
341static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner)
342{
343 u16 drp_control = le16_to_cpu(ie->drp_control);
344 drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10);
345 ie->drp_control = cpu_to_le16(drp_control);
346}
347
348static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker)
349{
350 u16 drp_control = le16_to_cpu(ie->drp_control);
351 drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11);
352 ie->drp_control = cpu_to_le16(drp_control);
353}
354
355static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe)
356{
357 u16 drp_control = le16_to_cpu(ie->drp_control);
358 drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12);
359 ie->drp_control = cpu_to_le16(drp_control);
360}
361
362/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */
363struct uwb_ie_drp_avail {
364 struct uwb_ie_hdr hdr;
365 DECLARE_BITMAP(bmp, UWB_NUM_MAS);
366} __attribute__((packed));
367
368/**
369 * The Vendor ID is set to an OUI that indicates the vendor of the device.
370 * ECMA-368 [16.8.10]
371 */
372struct uwb_vendor_id {
373 u8 data[3];
374} __attribute__((packed));
375
376/**
377 * The device type ID
378 * FIXME: clarify what this means
379 * ECMA-368 [16.8.10]
380 */
381struct uwb_device_type_id {
382 u8 data[3];
383} __attribute__((packed));
384
385
386/**
387 * UWB device information types
388 * ECMA-368 [16.8.10]
389 */
390enum uwb_dev_info_type {
391 UWB_DEV_INFO_VENDOR_ID = 0,
392 UWB_DEV_INFO_VENDOR_TYPE,
393 UWB_DEV_INFO_NAME,
394};
395
396/**
397 * UWB device information found in Identification IE
398 * ECMA-368 [16.8.10]
399 */
400struct uwb_dev_info {
401 u8 type; /* enum uwb_dev_info_type */
402 u8 length;
403 u8 data[];
404} __attribute__((packed));
405
406/**
407 * UWB Identification IE
408 * ECMA-368 [16.8.10]
409 */
410struct uwb_identification_ie {
411 struct uwb_ie_hdr hdr;
412 struct uwb_dev_info info[];
413} __attribute__((packed));
414
415/*
416 * UWB Radio Controller
417 *
418 * These definitions are common to the Radio Control layers as
419 * exported by the WUSB1.0 HWA and WHCI interfaces.
420 */
421
422/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */
423struct uwb_rccb {
424 u8 bCommandType; /* enum hwa_cet */
425 __le16 wCommand; /* Command code */
426 u8 bCommandContext; /* Context ID */
427} __attribute__((packed));
428
429
430/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */
431struct uwb_rceb {
432 u8 bEventType; /* enum hwa_cet */
433 __le16 wEvent; /* Event code */
434 u8 bEventContext; /* Context ID */
435} __attribute__((packed));
436
437
438enum {
439 UWB_RC_CET_GENERAL = 0, /* General Command/Event type */
440 UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */
441};
442
443/* Commands to the radio controller */
444enum uwb_rc_cmd {
445 UWB_RC_CMD_CHANNEL_CHANGE = 16,
446 UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */
447 UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */
448 UWB_RC_CMD_RESET = 19,
449 UWB_RC_CMD_SCAN = 20, /* Scan management */
450 UWB_RC_CMD_SET_BEACON_FILTER = 21,
451 UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */
452 UWB_RC_CMD_SET_IE = 23, /* Information Element management */
453 UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24,
454 UWB_RC_CMD_SET_TX_POWER = 25,
455 UWB_RC_CMD_SLEEP = 26,
456 UWB_RC_CMD_START_BEACON = 27,
457 UWB_RC_CMD_STOP_BEACON = 28,
458 UWB_RC_CMD_BP_MERGE = 29,
459 UWB_RC_CMD_SEND_COMMAND_FRAME = 30,
460 UWB_RC_CMD_SET_ASIE_NOTIF = 31,
461};
462
463/* Notifications from the radio controller */
464enum uwb_rc_evt {
465 UWB_RC_EVT_IE_RCV = 0,
466 UWB_RC_EVT_BEACON = 1,
467 UWB_RC_EVT_BEACON_SIZE = 2,
468 UWB_RC_EVT_BPOIE_CHANGE = 3,
469 UWB_RC_EVT_BP_SLOT_CHANGE = 4,
470 UWB_RC_EVT_BP_SWITCH_IE_RCV = 5,
471 UWB_RC_EVT_DEV_ADDR_CONFLICT = 6,
472 UWB_RC_EVT_DRP_AVAIL = 7,
473 UWB_RC_EVT_DRP = 8,
474 UWB_RC_EVT_BP_SWITCH_STATUS = 9,
475 UWB_RC_EVT_CMD_FRAME_RCV = 10,
476 UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11,
477 /* Events (command responses) use the same code as the command */
478 UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535,
479};
480
481enum uwb_rc_extended_type_1_cmd {
482 UWB_RC_SET_DAA_ENERGY_MASK = 32,
483 UWB_RC_SET_NOTIFICATION_FILTER_EX = 33,
484};
485
486enum uwb_rc_extended_type_1_evt {
487 UWB_RC_DAA_ENERGY_DETECTED = 0,
488};
489
490/* Radio Control Result Code. [WHCI] table 3-3. */
491enum {
492 UWB_RC_RES_SUCCESS = 0,
493 UWB_RC_RES_FAIL,
494 UWB_RC_RES_FAIL_HARDWARE,
495 UWB_RC_RES_FAIL_NO_SLOTS,
496 UWB_RC_RES_FAIL_BEACON_TOO_LARGE,
497 UWB_RC_RES_FAIL_INVALID_PARAMETER,
498 UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL,
499 UWB_RC_RES_FAIL_INVALID_IE_DATA,
500 UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED,
501 UWB_RC_RES_FAIL_CANCELLED,
502 UWB_RC_RES_FAIL_INVALID_STATE,
503 UWB_RC_RES_FAIL_INVALID_SIZE,
504 UWB_RC_RES_FAIL_ACK_NOT_RECEIVED,
505 UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF,
506 UWB_RC_RES_FAIL_TIME_OUT = 255,
507};
508
509/* Confirm event. [WHCI] section 3.1.3.1 etc. */
510struct uwb_rc_evt_confirm {
511 struct uwb_rceb rceb;
512 u8 bResultCode;
513} __attribute__((packed));
514
515/* Device Address Management event. [WHCI] section 3.1.3.2. */
516struct uwb_rc_evt_dev_addr_mgmt {
517 struct uwb_rceb rceb;
518 u8 baAddr[6];
519 u8 bResultCode;
520} __attribute__((packed));
521
522
523/* Get IE Event. [WHCI] section 3.1.3.3. */
524struct uwb_rc_evt_get_ie {
525 struct uwb_rceb rceb;
526 __le16 wIELength;
527 u8 IEData[];
528} __attribute__((packed));
529
530/* Set DRP IE Event. [WHCI] section 3.1.3.7. */
531struct uwb_rc_evt_set_drp_ie {
532 struct uwb_rceb rceb;
533 __le16 wRemainingSpace;
534 u8 bResultCode;
535} __attribute__((packed));
536
537/* Set IE Event. [WHCI] section 3.1.3.8. */
538struct uwb_rc_evt_set_ie {
539 struct uwb_rceb rceb;
540 __le16 RemainingSpace;
541 u8 bResultCode;
542} __attribute__((packed));
543
544/* Scan command. [WHCI] 3.1.3.5. */
545struct uwb_rc_cmd_scan {
546 struct uwb_rccb rccb;
547 u8 bChannelNumber;
548 u8 bScanState;
549 __le16 wStartTime;
550} __attribute__((packed));
551
552/* Set DRP IE command. [WHCI] section 3.1.3.7. */
553struct uwb_rc_cmd_set_drp_ie {
554 struct uwb_rccb rccb;
555 __le16 wIELength;
556 struct uwb_ie_drp IEData[];
557} __attribute__((packed));
558
559/* Set IE command. [WHCI] section 3.1.3.8. */
560struct uwb_rc_cmd_set_ie {
561 struct uwb_rccb rccb;
562 __le16 wIELength;
563 u8 IEData[];
564} __attribute__((packed));
565
566/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */
567struct uwb_rc_evt_set_daa_energy_mask {
568 struct uwb_rceb rceb;
569 __le16 wLength;
570 u8 result;
571} __attribute__((packed));
572
573/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */
574struct uwb_rc_evt_set_notification_filter_ex {
575 struct uwb_rceb rceb;
576 __le16 wLength;
577 u8 result;
578} __attribute__((packed));
579
580/* IE Received notification. [WHCI] section 3.1.4.1. */
581struct uwb_rc_evt_ie_rcv {
582 struct uwb_rceb rceb;
583 struct uwb_dev_addr SrcAddr;
584 __le16 wIELength;
585 u8 IEData[];
586} __attribute__((packed));
587
588/* Type of the received beacon. [WHCI] section 3.1.4.2. */
589enum uwb_rc_beacon_type {
590 UWB_RC_BEACON_TYPE_SCAN = 0,
591 UWB_RC_BEACON_TYPE_NEIGHBOR,
592 UWB_RC_BEACON_TYPE_OL_ALIEN,
593 UWB_RC_BEACON_TYPE_NOL_ALIEN,
594};
595
596/* Beacon received notification. [WHCI] 3.1.4.2. */
597struct uwb_rc_evt_beacon {
598 struct uwb_rceb rceb;
599 u8 bChannelNumber;
600 u8 bBeaconType;
601 __le16 wBPSTOffset;
602 u8 bLQI;
603 u8 bRSSI;
604 __le16 wBeaconInfoLength;
605 u8 BeaconInfo[];
606} __attribute__((packed));
607
608
609/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */
610struct uwb_rc_evt_beacon_size {
611 struct uwb_rceb rceb;
612 __le16 wNewBeaconSize;
613} __attribute__((packed));
614
615
616/* BPOIE Change notification. [WHCI] section 3.1.4.4. */
617struct uwb_rc_evt_bpoie_change {
618 struct uwb_rceb rceb;
619 __le16 wBPOIELength;
620 u8 BPOIE[];
621} __attribute__((packed));
622
623
624/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */
625struct uwb_rc_evt_bp_slot_change {
626 struct uwb_rceb rceb;
627 u8 slot_info;
628} __attribute__((packed));
629
630static inline int uwb_rc_evt_bp_slot_change_slot_num(
631 const struct uwb_rc_evt_bp_slot_change *evt)
632{
633 return evt->slot_info & 0x7f;
634}
635
636static inline int uwb_rc_evt_bp_slot_change_no_slot(
637 const struct uwb_rc_evt_bp_slot_change *evt)
638{
639 return (evt->slot_info & 0x80) >> 7;
640}
641
642/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */
643struct uwb_rc_evt_bp_switch_ie_rcv {
644 struct uwb_rceb rceb;
645 struct uwb_dev_addr wSrcAddr;
646 __le16 wIELength;
647 u8 IEData[];
648} __attribute__((packed));
649
650/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */
651struct uwb_rc_evt_dev_addr_conflict {
652 struct uwb_rceb rceb;
653} __attribute__((packed));
654
655/* DRP notification. [WHCI] section 3.1.4.9. */
656struct uwb_rc_evt_drp {
657 struct uwb_rceb rceb;
658 struct uwb_dev_addr src_addr;
659 u8 reason;
660 u8 beacon_slot_number;
661 __le16 ie_length;
662 u8 ie_data[];
663} __attribute__((packed));
664
665static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt)
666{
667 return evt->reason & 0x0f;
668}
669
670
671/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */
672struct uwb_rc_evt_drp_avail {
673 struct uwb_rceb rceb;
674 DECLARE_BITMAP(bmp, UWB_NUM_MAS);
675} __attribute__((packed));
676
677/* BP switch status notification. [WHCI] section 3.1.4.10. */
678struct uwb_rc_evt_bp_switch_status {
679 struct uwb_rceb rceb;
680 u8 status;
681 u8 slot_offset;
682 __le16 bpst_offset;
683 u8 move_countdown;
684} __attribute__((packed));
685
686/* Command Frame Received notification. [WHCI] section 3.1.4.11. */
687struct uwb_rc_evt_cmd_frame_rcv {
688 struct uwb_rceb rceb;
689 __le16 receive_time;
690 struct uwb_dev_addr wSrcAddr;
691 struct uwb_dev_addr wDstAddr;
692 __le16 control;
693 __le16 reserved;
694 __le16 dataLength;
695 u8 data[];
696} __attribute__((packed));
697
698/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */
699struct uwb_rc_evt_channel_change_ie_rcv {
700 struct uwb_rceb rceb;
701 struct uwb_dev_addr wSrcAddr;
702 __le16 wIELength;
703 u8 IEData[];
704} __attribute__((packed));
705
706/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */
707struct uwb_rc_evt_daa_energy_detected {
708 struct uwb_rceb rceb;
709 __le16 wLength;
710 u8 bandID;
711 u8 reserved;
712 u8 toneBmp[16];
713} __attribute__((packed));
714
715
716/**
717 * Radio Control Interface Class Descriptor
718 *
719 * WUSB 1.0 [8.6.1.2]
720 */
721struct uwb_rc_control_intf_class_desc {
722 u8 bLength;
723 u8 bDescriptorType;
724 __le16 bcdRCIVersion;
725} __attribute__((packed));
726
727#endif /* #ifndef __LINUX__UWB_SPEC_H__ */
diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h
new file mode 100644
index 000000000000..36a39e34f8d7
--- /dev/null
+++ b/include/linux/uwb/umc.h
@@ -0,0 +1,194 @@
1/*
2 * UWB Multi-interface Controller support.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This file is released under the GPLv2
7 *
8 * UMC (UWB Multi-interface Controller) capabilities (e.g., radio
9 * controller, host controller) are presented as devices on the "umc"
10 * bus.
11 *
12 * The radio controller is not strictly a UMC capability but it's
13 * useful to present it as such.
14 *
15 * References:
16 *
17 * [WHCI] Wireless Host Controller Interface Specification for
18 * Certified Wireless Universal Serial Bus, revision 0.95.
19 *
20 * How this works is kind of convoluted but simple. The whci.ko driver
21 * loads when WHCI devices are detected. These WHCI devices expose
22 * many devices in the same PCI function (they couldn't have reused
23 * functions, no), so for each PCI function that exposes these many
24 * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()]
25 * with umc_device_create() and adds it to the bus with
26 * umc_device_register().
27 *
28 * umc_device_register() calls device_register() which will push the
29 * bus management code to load your UMC driver's somehting_probe()
30 * that you have registered for that capability code.
31 *
32 * Now when the WHCI device is removed, whci_remove() will go over
33 * each umc_dev assigned to each of the PCI function's capabilities
34 * and through whci_del_cap() call umc_device_unregister() each
35 * created umc_dev. Of course, if you are bound to the device, your
36 * driver's something_remove() will be called.
37 */
38
39#ifndef _LINUX_UWB_UMC_H_
40#define _LINUX_UWB_UMC_H_
41
42#include <linux/device.h>
43#include <linux/pci.h>
44
45/*
46 * UMC capability IDs.
47 *
48 * 0x00 is reserved so use it for the radio controller device.
49 *
50 * [WHCI] table 2-8
51 */
52#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */
53#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */
54
55/**
56 * struct umc_dev - UMC capability device
57 *
58 * @version: version of the specification this capability conforms to.
59 * @cap_id: capability ID.
60 * @bar: PCI Bar (64 bit) where the resource lies
61 * @resource: register space resource.
62 * @irq: interrupt line.
63 */
64struct umc_dev {
65 u16 version;
66 u8 cap_id;
67 u8 bar;
68 struct resource resource;
69 unsigned irq;
70 struct device dev;
71};
72
73#define to_umc_dev(d) container_of(d, struct umc_dev, dev)
74
75/**
76 * struct umc_driver - UMC capability driver
77 * @cap_id: supported capability ID.
78 * @match: driver specific capability matching function.
79 * @match_data: driver specific data for match() (e.g., a
80 * table of pci_device_id's if umc_match_pci_id() is used).
81 */
82struct umc_driver {
83 char *name;
84 u8 cap_id;
85 int (*match)(struct umc_driver *, struct umc_dev *);
86 const void *match_data;
87
88 int (*probe)(struct umc_dev *);
89 void (*remove)(struct umc_dev *);
90 int (*suspend)(struct umc_dev *, pm_message_t state);
91 int (*resume)(struct umc_dev *);
92
93 struct device_driver driver;
94};
95
96#define to_umc_driver(d) container_of(d, struct umc_driver, driver)
97
98extern struct bus_type umc_bus_type;
99
100struct umc_dev *umc_device_create(struct device *parent, int n);
101int __must_check umc_device_register(struct umc_dev *umc);
102void umc_device_unregister(struct umc_dev *umc);
103
104int __must_check __umc_driver_register(struct umc_driver *umc_drv,
105 struct module *mod,
106 const char *mod_name);
107
108/**
109 * umc_driver_register - register a UMC capabiltity driver.
110 * @umc_drv: pointer to the driver.
111 */
112static inline int __must_check umc_driver_register(struct umc_driver *umc_drv)
113{
114 return __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME);
115}
116void umc_driver_unregister(struct umc_driver *umc_drv);
117
118/*
119 * Utility function you can use to match (umc_driver->match) against a
120 * null-terminated array of 'struct pci_device_id' in
121 * umc_driver->match_data.
122 */
123int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc);
124
125/**
126 * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none
127 * @umc_dev: UMC device whose parent PCI device we are looking for
128 *
129 * DIRTY!!! DON'T RELY ON THIS
130 *
131 * FIXME: This is as dirty as it gets, but we need some way to check
132 * the correct type of umc_dev->parent (so that for example, we can
133 * cast to pci_dev). Casting to pci_dev is necesary because at some
134 * point we need to request resources from the device. Mapping is
135 * easily over come (ioremap and stuff are bus agnostic), but hooking
136 * up to some error handlers (such as pci error handlers) might need
137 * this.
138 *
139 * THIS might (probably will) be removed in the future, so don't count
140 * on it.
141 */
142static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev)
143{
144 struct pci_dev *pci_dev = NULL;
145 if (umc_dev->dev.parent->bus == &pci_bus_type)
146 pci_dev = to_pci_dev(umc_dev->dev.parent);
147 return pci_dev;
148}
149
150/**
151 * umc_dev_get() - reference a UMC device.
152 * @umc_dev: Pointer to UMC device.
153 *
154 * NOTE: we are assuming in this whole scheme that the parent device
155 * is referenced at _probe() time and unreferenced at _remove()
156 * time by the parent's subsystem.
157 */
158static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev)
159{
160 get_device(&umc_dev->dev);
161 return umc_dev;
162}
163
164/**
165 * umc_dev_put() - unreference a UMC device.
166 * @umc_dev: Pointer to UMC device.
167 */
168static inline void umc_dev_put(struct umc_dev *umc_dev)
169{
170 put_device(&umc_dev->dev);
171}
172
173/**
174 * umc_set_drvdata - set UMC device's driver data.
175 * @umc_dev: Pointer to UMC device.
176 * @data: Data to set.
177 */
178static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data)
179{
180 dev_set_drvdata(&umc_dev->dev, data);
181}
182
183/**
184 * umc_get_drvdata - recover UMC device's driver data.
185 * @umc_dev: Pointer to UMC device.
186 */
187static inline void *umc_get_drvdata(struct umc_dev *umc_dev)
188{
189 return dev_get_drvdata(&umc_dev->dev);
190}
191
192int umc_controller_reset(struct umc_dev *umc);
193
194#endif /* #ifndef _LINUX_UWB_UMC_H_ */
diff --git a/include/linux/uwb/whci.h b/include/linux/uwb/whci.h
new file mode 100644
index 000000000000..915ec23042d4
--- /dev/null
+++ b/include/linux/uwb/whci.h
@@ -0,0 +1,117 @@
1/*
2 * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB
3 *
4 * Copyright (C) 2005-2006 Intel Corporation
5 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 *
23 * References:
24 * [WHCI] Wireless Host Controller Interface Specification for
25 * Certified Wireless Universal Serial Bus, revision 0.95.
26 */
27#ifndef _LINUX_UWB_WHCI_H_
28#define _LINUX_UWB_WHCI_H_
29
30#include <linux/pci.h>
31
32/*
33 * UWB interface capability registers (offsets from UWBBASE)
34 *
35 * [WHCI] section 2.2
36 */
37#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */
38# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull)
39#define UWBCAPDATA(n) (8*(n))
40# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull)
41# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull)
42# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull)
43# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32))
44# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull)
45
46/* Size of the WHCI capability data (including the RC capability) for
47 a device with n capabilities. */
48#define UWBCAPDATA_SIZE(n) (8 + 8*(n))
49
50
51/*
52 * URC registers (offsets from URCBASE)
53 *
54 * [WHCI] section 2.3
55 */
56#define URCCMD 0x00
57# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */
58# define URCCMD_RS (1 << 30) /* Run/Stop */
59# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */
60# define URCCMD_ACTIVE (1 << 15) /* Command is active */
61# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */
62# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */
63#define URCSTS 0x04
64# define URCSTS_EPS (1 << 17) /* Event Processing Status */
65# define URCSTS_HALTED (1 << 16) /* RC halted */
66# define URCSTS_HSE (1 << 10) /* Host System Error...fried */
67# define URCSTS_ER (1 << 9) /* Event Ready */
68# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */
69# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */
70# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */
71#define URCINTR 0x08
72# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */
73#define URCCMDADDR 0x10
74#define URCEVTADDR 0x18
75# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */
76
77
78/** Write 32 bit @value to little endian register at @addr */
79static inline
80void le_writel(u32 value, void __iomem *addr)
81{
82 iowrite32(value, addr);
83}
84
85
86/** Read from 32 bit little endian register at @addr */
87static inline
88u32 le_readl(void __iomem *addr)
89{
90 return ioread32(addr);
91}
92
93
94/** Write 64 bit @value to little endian register at @addr */
95static inline
96void le_writeq(u64 value, void __iomem *addr)
97{
98 iowrite32(value, addr);
99 iowrite32(value >> 32, addr + 4);
100}
101
102
103/** Read from 64 bit little endian register at @addr */
104static inline
105u64 le_readq(void __iomem *addr)
106{
107 u64 value;
108 value = ioread32(addr);
109 value |= (u64)ioread32(addr + 4) << 32;
110 return value;
111}
112
113extern int whci_wait_for(struct device *dev, u32 __iomem *reg,
114 u32 mask, u32 result,
115 unsigned long max_ms, const char *tag);
116
117#endif /* #ifndef _LINUX_UWB_WHCI_H_ */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index d4b03034ee73..4669d7e72e75 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -315,6 +315,13 @@ struct v4l2_pix_format {
315/* see http://www.siliconimaging.com/RGB%20Bayer.htm */ 315/* see http://www.siliconimaging.com/RGB%20Bayer.htm */
316#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ 316#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
317#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ 317#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
318/*
319 * 10bit raw bayer, expanded to 16 bits
320 * xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb...
321 */
322#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0')
323/* 10bit raw bayer DPCM compressed to 8 bits */
324#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
318#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */ 325#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
319 326
320/* compressed formats */ 327/* compressed formats */
diff --git a/include/linux/wlp.h b/include/linux/wlp.h
new file mode 100644
index 000000000000..033545e145c7
--- /dev/null
+++ b/include/linux/wlp.h
@@ -0,0 +1,735 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 *
4 * Copyright (C) 2005-2006 Intel Corporation
5 * Reinette Chatre <reinette.chatre@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * FIXME: docs
23 *
24 * - Does not (yet) include support for WLP control frames
25 * WLP Draft 0.99 [6.5].
26 *
27 * A visual representation of the data structures.
28 *
29 * wssidB wssidB
30 * ^ ^
31 * | |
32 * wssidA wssidA
33 * wlp interface { ^ ^
34 * ... | |
35 * ... ... wssid wssid ...
36 * wlp --- ... | |
37 * }; neighbors --> neighbA --> neighbB
38 * ...
39 * wss
40 * ...
41 * eda cache --> neighborA --> neighborB --> neighborC ...
42 */
43
44#ifndef __LINUX__WLP_H_
45#define __LINUX__WLP_H_
46
47#include <linux/netdevice.h>
48#include <linux/skbuff.h>
49#include <linux/list.h>
50#include <linux/uwb.h>
51
52/**
53 * WLP Protocol ID
54 * WLP Draft 0.99 [6.2]
55 *
56 * The MUX header for all WLP frames
57 */
58#define WLP_PROTOCOL_ID 0x0100
59
60/**
61 * WLP Version
62 * WLP version placed in the association frames (WLP 0.99 [6.6])
63 */
64#define WLP_VERSION 0x10
65
66/**
67 * Bytes needed to print UUID as string
68 */
69#define WLP_WSS_UUID_STRSIZE 48
70
71/**
72 * Bytes needed to print nonce as string
73 */
74#define WLP_WSS_NONCE_STRSIZE 48
75
76
77/**
78 * Size used for WLP name size
79 *
80 * The WSS name is set to 65 bytes, 1 byte larger than the maximum
81 * allowed by the WLP spec. This is to have a null terminated string
82 * for display to the user. A maximum of 64 bytes will still be used
83 * when placing the WSS name field in association frames.
84 */
85#define WLP_WSS_NAME_SIZE 65
86
87/**
88 * Number of bytes added by WLP to data frame
89 *
90 * A data frame transmitted from a host will be placed in a Standard or
91 * Abbreviated WLP frame. These have an extra 4 bytes of header (struct
92 * wlp_frame_std_abbrv_hdr).
93 * When the stack sends this data frame for transmission it needs to ensure
94 * there is enough headroom for this header.
95 */
96#define WLP_DATA_HLEN 4
97
98/**
99 * State of device regarding WLP Service Set
100 *
101 * WLP_WSS_STATE_NONE: the host does not participate in any WSS
102 * WLP_WSS_STATE_PART_ENROLLED: used as part of the enrollment sequence
103 * ("Partial Enroll"). This state is used to
104 * indicate the first part of enrollment that is
105 * unsecure. If the WSS is unsecure then the
106 * state will promptly go to WLP_WSS_STATE_ENROLLED,
107 * if the WSS is not secure then the enrollment
108 * procedure is a few more steps before we are
109 * enrolled.
110 * WLP_WSS_STATE_ENROLLED: the host is enrolled in a WSS
111 * WLP_WSS_STATE_ACTIVE: WSS is activated
112 * WLP_WSS_STATE_CONNECTED: host is connected to neighbor in WSS
113 *
114 */
115enum wlp_wss_state {
116 WLP_WSS_STATE_NONE = 0,
117 WLP_WSS_STATE_PART_ENROLLED,
118 WLP_WSS_STATE_ENROLLED,
119 WLP_WSS_STATE_ACTIVE,
120 WLP_WSS_STATE_CONNECTED,
121};
122
123/**
124 * WSS Secure status
125 * WLP 0.99 Table 6
126 *
127 * Set to one if the WSS is secure, zero if it is not secure
128 */
129enum wlp_wss_sec_status {
130 WLP_WSS_UNSECURE = 0,
131 WLP_WSS_SECURE,
132};
133
134/**
135 * WLP frame type
136 * WLP Draft 0.99 [6.2 Table 1]
137 */
138enum wlp_frame_type {
139 WLP_FRAME_STANDARD = 0,
140 WLP_FRAME_ABBREVIATED,
141 WLP_FRAME_CONTROL,
142 WLP_FRAME_ASSOCIATION,
143};
144
145/**
146 * WLP Association Message Type
147 * WLP Draft 0.99 [6.6.1.2 Table 8]
148 */
149enum wlp_assoc_type {
150 WLP_ASSOC_D1 = 2,
151 WLP_ASSOC_D2 = 3,
152 WLP_ASSOC_M1 = 4,
153 WLP_ASSOC_M2 = 5,
154 WLP_ASSOC_M3 = 7,
155 WLP_ASSOC_M4 = 8,
156 WLP_ASSOC_M5 = 9,
157 WLP_ASSOC_M6 = 10,
158 WLP_ASSOC_M7 = 11,
159 WLP_ASSOC_M8 = 12,
160 WLP_ASSOC_F0 = 14,
161 WLP_ASSOC_E1 = 32,
162 WLP_ASSOC_E2 = 33,
163 WLP_ASSOC_C1 = 34,
164 WLP_ASSOC_C2 = 35,
165 WLP_ASSOC_C3 = 36,
166 WLP_ASSOC_C4 = 37,
167};
168
169/**
170 * WLP Attribute Type
171 * WLP Draft 0.99 [6.6.1 Table 6]
172 */
173enum wlp_attr_type {
174 WLP_ATTR_AUTH = 0x1005, /* Authenticator */
175 WLP_ATTR_DEV_NAME = 0x1011, /* Device Name */
176 WLP_ATTR_DEV_PWD_ID = 0x1012, /* Device Password ID */
177 WLP_ATTR_E_HASH1 = 0x1014, /* E-Hash1 */
178 WLP_ATTR_E_HASH2 = 0x1015, /* E-Hash2 */
179 WLP_ATTR_E_SNONCE1 = 0x1016, /* E-SNonce1 */
180 WLP_ATTR_E_SNONCE2 = 0x1017, /* E-SNonce2 */
181 WLP_ATTR_ENCR_SET = 0x1018, /* Encrypted Settings */
182 WLP_ATTR_ENRL_NONCE = 0x101A, /* Enrollee Nonce */
183 WLP_ATTR_KEYWRAP_AUTH = 0x101E, /* Key Wrap Authenticator */
184 WLP_ATTR_MANUF = 0x1021, /* Manufacturer */
185 WLP_ATTR_MSG_TYPE = 0x1022, /* Message Type */
186 WLP_ATTR_MODEL_NAME = 0x1023, /* Model Name */
187 WLP_ATTR_MODEL_NR = 0x1024, /* Model Number */
188 WLP_ATTR_PUB_KEY = 0x1032, /* Public Key */
189 WLP_ATTR_REG_NONCE = 0x1039, /* Registrar Nonce */
190 WLP_ATTR_R_HASH1 = 0x103D, /* R-Hash1 */
191 WLP_ATTR_R_HASH2 = 0x103E, /* R-Hash2 */
192 WLP_ATTR_R_SNONCE1 = 0x103F, /* R-SNonce1 */
193 WLP_ATTR_R_SNONCE2 = 0x1040, /* R-SNonce2 */
194 WLP_ATTR_SERIAL = 0x1042, /* Serial number */
195 WLP_ATTR_UUID_E = 0x1047, /* UUID-E */
196 WLP_ATTR_UUID_R = 0x1048, /* UUID-R */
197 WLP_ATTR_PRI_DEV_TYPE = 0x1054, /* Primary Device Type */
198 WLP_ATTR_SEC_DEV_TYPE = 0x1055, /* Secondary Device Type */
199 WLP_ATTR_PORT_DEV = 0x1056, /* Portable Device */
200 WLP_ATTR_APP_EXT = 0x1058, /* Application Extension */
201 WLP_ATTR_WLP_VER = 0x2000, /* WLP Version */
202 WLP_ATTR_WSSID = 0x2001, /* WSSID */
203 WLP_ATTR_WSS_NAME = 0x2002, /* WSS Name */
204 WLP_ATTR_WSS_SEC_STAT = 0x2003, /* WSS Secure Status */
205 WLP_ATTR_WSS_BCAST = 0x2004, /* WSS Broadcast Address */
206 WLP_ATTR_WSS_M_KEY = 0x2005, /* WSS Master Key */
207 WLP_ATTR_ACC_ENRL = 0x2006, /* Accepting Enrollment */
208 WLP_ATTR_WSS_INFO = 0x2007, /* WSS Information */
209 WLP_ATTR_WSS_SEL_MTHD = 0x2008, /* WSS Selection Method */
210 WLP_ATTR_ASSC_MTHD_LIST = 0x2009, /* Association Methods List */
211 WLP_ATTR_SEL_ASSC_MTHD = 0x200A, /* Selected Association Method */
212 WLP_ATTR_ENRL_HASH_COMM = 0x200B, /* Enrollee Hash Commitment */
213 WLP_ATTR_WSS_TAG = 0x200C, /* WSS Tag */
214 WLP_ATTR_WSS_VIRT = 0x200D, /* WSS Virtual EUI-48 */
215 WLP_ATTR_WLP_ASSC_ERR = 0x200E, /* WLP Association Error */
216 WLP_ATTR_VNDR_EXT = 0x200F, /* Vendor Extension */
217};
218
219/**
220 * WLP Category ID of primary/secondary device
221 * WLP Draft 0.99 [6.6.1.8 Table 12]
222 */
223enum wlp_dev_category_id {
224 WLP_DEV_CAT_COMPUTER = 1,
225 WLP_DEV_CAT_INPUT,
226 WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER,
227 WLP_DEV_CAT_CAMERA,
228 WLP_DEV_CAT_STORAGE,
229 WLP_DEV_CAT_INFRASTRUCTURE,
230 WLP_DEV_CAT_DISPLAY,
231 WLP_DEV_CAT_MULTIM,
232 WLP_DEV_CAT_GAMING,
233 WLP_DEV_CAT_TELEPHONE,
234 WLP_DEV_CAT_OTHER = 65535,
235};
236
237/**
238 * WLP WSS selection method
239 * WLP Draft 0.99 [6.6.1.6 Table 10]
240 */
241enum wlp_wss_sel_mthd {
242 WLP_WSS_ENRL_SELECT = 1, /* Enrollee selects */
243 WLP_WSS_REG_SELECT, /* Registrar selects */
244};
245
246/**
247 * WLP association error values
248 * WLP Draft 0.99 [6.6.1.5 Table 9]
249 */
250enum wlp_assc_error {
251 WLP_ASSOC_ERROR_NONE,
252 WLP_ASSOC_ERROR_AUTH, /* Authenticator Failure */
253 WLP_ASSOC_ERROR_ROGUE, /* Rogue activity suspected */
254 WLP_ASSOC_ERROR_BUSY, /* Device busy */
255 WLP_ASSOC_ERROR_LOCK, /* Setup Locked */
256 WLP_ASSOC_ERROR_NOT_READY, /* Registrar not ready */
257 WLP_ASSOC_ERROR_INV, /* Invalid WSS selection */
258 WLP_ASSOC_ERROR_MSG_TIME, /* Message timeout */
259 WLP_ASSOC_ERROR_ENR_TIME, /* Enrollment session timeout */
260 WLP_ASSOC_ERROR_PW, /* Device password invalid */
261 WLP_ASSOC_ERROR_VER, /* Unsupported version */
262 WLP_ASSOC_ERROR_INT, /* Internal error */
263 WLP_ASSOC_ERROR_UNDEF, /* Undefined error */
264 WLP_ASSOC_ERROR_NUM, /* Numeric comparison failure */
265 WLP_ASSOC_ERROR_WAIT, /* Waiting for user input */
266};
267
268/**
269 * WLP Parameters
270 * WLP 0.99 [7.7]
271 */
272enum wlp_parameters {
273 WLP_PER_MSG_TIMEOUT = 15, /* Seconds to wait for response to
274 association message. */
275};
276
277/**
278 * WLP IE
279 *
280 * The WLP IE should be included in beacons by all devices.
281 *
282 * The driver can set only a few of the fields in this information element,
283 * most fields are managed by the device self. When the driver needs to set
284 * a field it will only provide values for the fields of interest, the rest
285 * will be filled with zeroes. The fields of interest are:
286 *
287 * Element ID
288 * Length
289 * Capabilities (only to include WSSID Hash list length)
290 * WSSID Hash List fields
291 *
292 * WLP 0.99 [6.7]
293 *
294 * Only the fields that will be used are detailed in this structure, rest
295 * are not detailed or marked as "notused".
296 */
297struct wlp_ie {
298 struct uwb_ie_hdr hdr;
299 __le16 capabilities;
300 __le16 cycle_param;
301 __le16 acw_anchor_addr;
302 u8 wssid_hash_list[];
303} __attribute__((packed));
304
305static inline int wlp_ie_hash_length(struct wlp_ie *ie)
306{
307 return (le16_to_cpu(ie->capabilities) >> 12) & 0xf;
308}
309
310static inline void wlp_ie_set_hash_length(struct wlp_ie *ie, int hash_length)
311{
312 u16 caps = le16_to_cpu(ie->capabilities);
313 caps = (caps & ~(0xf << 12)) | (hash_length << 12);
314 ie->capabilities = cpu_to_le16(caps);
315}
316
317/**
318 * WLP nonce
319 * WLP Draft 0.99 [6.6.1 Table 6]
320 *
321 * A 128-bit random number often used (E-SNonce1, E-SNonce2, Enrollee
322 * Nonce, Registrar Nonce, R-SNonce1, R-SNonce2). It is passed to HW so
323 * it is packed.
324 */
325struct wlp_nonce {
326 u8 data[16];
327} __attribute__((packed));
328
329/**
330 * WLP UUID
331 * WLP Draft 0.99 [6.6.1 Table 6]
332 *
333 * Universally Unique Identifier (UUID) encoded as an octet string in the
334 * order the octets are shown in string representation in RFC4122. A UUID
335 * is often used (UUID-E, UUID-R, WSSID). It is passed to HW so it is packed.
336 */
337struct wlp_uuid {
338 u8 data[16];
339} __attribute__((packed));
340
341
342/**
343 * Primary and secondary device type attributes
344 * WLP Draft 0.99 [6.6.1.8]
345 */
346struct wlp_dev_type {
347 enum wlp_dev_category_id category:16;
348 u8 OUI[3];
349 u8 OUIsubdiv;
350 __le16 subID;
351} __attribute__((packed));
352
353/**
354 * WLP frame header
355 * WLP Draft 0.99 [6.2]
356 */
357struct wlp_frame_hdr {
358 __le16 mux_hdr; /* WLP_PROTOCOL_ID */
359 enum wlp_frame_type type:8;
360} __attribute__((packed));
361
362/**
363 * WLP attribute field header
364 * WLP Draft 0.99 [6.6.1]
365 *
366 * Header of each attribute found in an association frame
367 */
368struct wlp_attr_hdr {
369 __le16 type;
370 __le16 length;
371} __attribute__((packed));
372
373/**
374 * Device information commonly used together
375 *
376 * Each of these device information elements has a specified range in which it
377 * should fit (WLP 0.99 [Table 6]). This range provided in the spec does not
378 * include the termination null '\0' character (when used in the
379 * association protocol the attribute fields are accompanied
380 * with a "length" field so the full range from the spec can be used for
381 * the value). We thus allocate an extra byte to be able to store a string
382 * of max length with a terminating '\0'.
383 */
384struct wlp_device_info {
385 char name[33];
386 char model_name[33];
387 char manufacturer[65];
388 char model_nr[33];
389 char serial[33];
390 struct wlp_dev_type prim_dev_type;
391};
392
393/**
394 * Macros for the WLP attributes
395 *
396 * There are quite a few attributes (total is 43). The attribute layout can be
397 * in one of three categories: one value, an array, an enum forced to 8 bits.
398 * These macros help with their definitions.
399 */
400#define wlp_attr(type, name) \
401struct wlp_attr_##name { \
402 struct wlp_attr_hdr hdr; \
403 type name; \
404} __attribute__((packed));
405
406#define wlp_attr_array(type, name) \
407struct wlp_attr_##name { \
408 struct wlp_attr_hdr hdr; \
409 type name[]; \
410} __attribute__((packed));
411
412/**
413 * WLP association attribute fields
414 * WLP Draft 0.99 [6.6.1 Table 6]
415 *
416 * Attributes appear in same order as the Table in the spec
417 * FIXME Does not define all attributes yet
418 */
419
420/* Device name: Friendly name of sending device */
421wlp_attr_array(u8, dev_name)
422
423/* Enrollee Nonce: Random number generated by enrollee for an enrollment
424 * session */
425wlp_attr(struct wlp_nonce, enonce)
426
427/* Manufacturer name: Name of manufacturer of the sending device */
428wlp_attr_array(u8, manufacturer)
429
430/* WLP Message Type */
431wlp_attr(u8, msg_type)
432
433/* WLP Model name: Model name of sending device */
434wlp_attr_array(u8, model_name)
435
436/* WLP Model number: Model number of sending device */
437wlp_attr_array(u8, model_nr)
438
439/* Registrar Nonce: Random number generated by registrar for an enrollment
440 * session */
441wlp_attr(struct wlp_nonce, rnonce)
442
443/* Serial number of device */
444wlp_attr_array(u8, serial)
445
446/* UUID of enrollee */
447wlp_attr(struct wlp_uuid, uuid_e)
448
449/* UUID of registrar */
450wlp_attr(struct wlp_uuid, uuid_r)
451
452/* WLP Primary device type */
453wlp_attr(struct wlp_dev_type, prim_dev_type)
454
455/* WLP Secondary device type */
456wlp_attr(struct wlp_dev_type, sec_dev_type)
457
458/* WLP protocol version */
459wlp_attr(u8, version)
460
461/* WLP service set identifier */
462wlp_attr(struct wlp_uuid, wssid)
463
464/* WLP WSS name */
465wlp_attr_array(u8, wss_name)
466
467/* WLP WSS Secure Status */
468wlp_attr(u8, wss_sec_status)
469
470/* WSS Broadcast Address */
471wlp_attr(struct uwb_mac_addr, wss_bcast)
472
473/* WLP Accepting Enrollment */
474wlp_attr(u8, accept_enrl)
475
476/**
477 * WSS information attributes
478 * WLP Draft 0.99 [6.6.3 Table 15]
479 */
480struct wlp_wss_info {
481 struct wlp_attr_wssid wssid;
482 struct wlp_attr_wss_name name;
483 struct wlp_attr_accept_enrl accept;
484 struct wlp_attr_wss_sec_status sec_stat;
485 struct wlp_attr_wss_bcast bcast;
486} __attribute__((packed));
487
488/* WLP WSS Information */
489wlp_attr_array(struct wlp_wss_info, wss_info)
490
491/* WLP WSS Selection method */
492wlp_attr(u8, wss_sel_mthd)
493
494/* WLP WSS tag */
495wlp_attr(u8, wss_tag)
496
497/* WSS Virtual Address */
498wlp_attr(struct uwb_mac_addr, wss_virt)
499
500/* WLP association error */
501wlp_attr(u8, wlp_assc_err)
502
503/**
504 * WLP standard and abbreviated frames
505 *
506 * WLP Draft 0.99 [6.3] and [6.4]
507 *
508 * The difference between the WLP standard frame and the WLP
509 * abbreviated frame is that the standard frame includes the src
510 * and dest addresses from the Ethernet header, the abbreviated frame does
511 * not.
512 * The src/dest (as well as the type/length and client data) are already
513 * defined as part of the Ethernet header, we do not do this here.
514 * From this perspective the standard and abbreviated frames appear the
515 * same - they will be treated differently though.
516 *
517 * The size of this header is also captured in WLP_DATA_HLEN to enable
518 * interfaces to prepare their headroom.
519 */
520struct wlp_frame_std_abbrv_hdr {
521 struct wlp_frame_hdr hdr;
522 u8 tag;
523} __attribute__((packed));
524
525/**
526 * WLP association frames
527 *
528 * WLP Draft 0.99 [6.6]
529 */
530struct wlp_frame_assoc {
531 struct wlp_frame_hdr hdr;
532 enum wlp_assoc_type type:8;
533 struct wlp_attr_version version;
534 struct wlp_attr_msg_type msg_type;
535 u8 attr[];
536} __attribute__((packed));
537
538/* Ethernet to dev address mapping */
539struct wlp_eda {
540 spinlock_t lock;
541 struct list_head cache; /* Eth<->Dev Addr cache */
542};
543
544/**
545 * WSS information temporary storage
546 *
547 * This information is only stored temporarily during discovery. It should
548 * not be stored unless the device is enrolled in the advertised WSS. This
549 * is done mainly because we follow the letter of the spec in this regard.
550 * See WLP 0.99 [7.2.3].
551 * When the device does become enrolled in a WSS the WSS information will
552 * be stored as part of the more comprehensive struct wlp_wss.
553 */
554struct wlp_wss_tmp_info {
555 char name[WLP_WSS_NAME_SIZE];
556 u8 accept_enroll;
557 u8 sec_status;
558 struct uwb_mac_addr bcast;
559};
560
561struct wlp_wssid_e {
562 struct list_head node;
563 struct wlp_uuid wssid;
564 struct wlp_wss_tmp_info *info;
565};
566
567/**
568 * A cache entry of WLP neighborhood
569 *
570 * @node: head of list is wlp->neighbors
571 * @wssid: list of wssids of this neighbor, element is wlp_wssid_e
572 * @info: temporary storage for information learned during discovery. This
573 * storage is used together with the wssid_e temporary storage
574 * during discovery.
575 */
576struct wlp_neighbor_e {
577 struct list_head node;
578 struct wlp_uuid uuid;
579 struct uwb_dev *uwb_dev;
580 struct list_head wssid; /* Elements are wlp_wssid_e */
581 struct wlp_device_info *info;
582};
583
584struct wlp;
585/**
586 * Information for an association session in progress.
587 *
588 * @exp_message: The type of the expected message. Both this message and a
589 * F0 message (which can be sent in response to any
590 * association frame) will be accepted as a valid message for
591 * this session.
592 * @cb: The function that will be called upon receipt of this
593 * message.
594 * @cb_priv: Private data of callback
595 * @data: Data used in association process (always a sk_buff?)
596 * @neighbor: Address of neighbor with which association session is in
597 * progress.
598 */
599struct wlp_session {
600 enum wlp_assoc_type exp_message;
601 void (*cb)(struct wlp *);
602 void *cb_priv;
603 void *data;
604 struct uwb_dev_addr neighbor_addr;
605};
606
607/**
608 * WLP Service Set
609 *
610 * @mutex: used to protect entire WSS structure.
611 *
612 * @name: The WSS name is set to 65 bytes, 1 byte larger than the maximum
613 * allowed by the WLP spec. This is to have a null terminated string
614 * for display to the user. A maximum of 64 bytes will still be used
615 * when placing the WSS name field in association frames.
616 *
617 * @accept_enroll: Accepting enrollment: Set to one if registrar is
618 * accepting enrollment in WSS, or zero otherwise.
619 *
620 * Global and local information for each WSS in which we are enrolled.
621 * WLP 0.99 Section 7.2.1 and Section 7.2.2
622 */
623struct wlp_wss {
624 struct mutex mutex;
625 struct kobject kobj;
626 /* Global properties. */
627 struct wlp_uuid wssid;
628 u8 hash;
629 char name[WLP_WSS_NAME_SIZE];
630 struct uwb_mac_addr bcast;
631 u8 secure_status:1;
632 u8 master_key[16];
633 /* Local properties. */
634 u8 tag;
635 struct uwb_mac_addr virtual_addr;
636 /* Extra */
637 u8 accept_enroll:1;
638 enum wlp_wss_state state;
639};
640
641/**
642 * WLP main structure
643 * @mutex: protect changes to WLP structure. We only allow changes to the
644 * uuid, so currently this mutex only protects this field.
645 */
646struct wlp {
647 struct mutex mutex;
648 struct uwb_rc *rc; /* UWB radio controller */
649 struct uwb_pal pal;
650 struct wlp_eda eda;
651 struct wlp_uuid uuid;
652 struct wlp_session *session;
653 struct wlp_wss wss;
654 struct mutex nbmutex; /* Neighbor mutex protects neighbors list */
655 struct list_head neighbors; /* Elements are wlp_neighbor_e */
656 struct uwb_notifs_handler uwb_notifs_handler;
657 struct wlp_device_info *dev_info;
658 void (*fill_device_info)(struct wlp *wlp, struct wlp_device_info *info);
659 int (*xmit_frame)(struct wlp *, struct sk_buff *,
660 struct uwb_dev_addr *);
661 void (*stop_queue)(struct wlp *);
662 void (*start_queue)(struct wlp *);
663};
664
665/* sysfs */
666
667
668struct wlp_wss_attribute {
669 struct attribute attr;
670 ssize_t (*show)(struct wlp_wss *wss, char *buf);
671 ssize_t (*store)(struct wlp_wss *wss, const char *buf, size_t count);
672};
673
674#define WSS_ATTR(_name, _mode, _show, _store) \
675static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \
676 _show, _store)
677
678extern int wlp_setup(struct wlp *, struct uwb_rc *);
679extern void wlp_remove(struct wlp *);
680extern ssize_t wlp_neighborhood_show(struct wlp *, char *);
681extern int wlp_wss_setup(struct net_device *, struct wlp_wss *);
682extern void wlp_wss_remove(struct wlp_wss *);
683extern ssize_t wlp_wss_activate_show(struct wlp_wss *, char *);
684extern ssize_t wlp_wss_activate_store(struct wlp_wss *, const char *, size_t);
685extern ssize_t wlp_eda_show(struct wlp *, char *);
686extern ssize_t wlp_eda_store(struct wlp *, const char *, size_t);
687extern ssize_t wlp_uuid_show(struct wlp *, char *);
688extern ssize_t wlp_uuid_store(struct wlp *, const char *, size_t);
689extern ssize_t wlp_dev_name_show(struct wlp *, char *);
690extern ssize_t wlp_dev_name_store(struct wlp *, const char *, size_t);
691extern ssize_t wlp_dev_manufacturer_show(struct wlp *, char *);
692extern ssize_t wlp_dev_manufacturer_store(struct wlp *, const char *, size_t);
693extern ssize_t wlp_dev_model_name_show(struct wlp *, char *);
694extern ssize_t wlp_dev_model_name_store(struct wlp *, const char *, size_t);
695extern ssize_t wlp_dev_model_nr_show(struct wlp *, char *);
696extern ssize_t wlp_dev_model_nr_store(struct wlp *, const char *, size_t);
697extern ssize_t wlp_dev_serial_show(struct wlp *, char *);
698extern ssize_t wlp_dev_serial_store(struct wlp *, const char *, size_t);
699extern ssize_t wlp_dev_prim_category_show(struct wlp *, char *);
700extern ssize_t wlp_dev_prim_category_store(struct wlp *, const char *,
701 size_t);
702extern ssize_t wlp_dev_prim_OUI_show(struct wlp *, char *);
703extern ssize_t wlp_dev_prim_OUI_store(struct wlp *, const char *, size_t);
704extern ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *, char *);
705extern ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *, const char *,
706 size_t);
707extern ssize_t wlp_dev_prim_subcat_show(struct wlp *, char *);
708extern ssize_t wlp_dev_prim_subcat_store(struct wlp *, const char *,
709 size_t);
710extern int wlp_receive_frame(struct device *, struct wlp *, struct sk_buff *,
711 struct uwb_dev_addr *);
712extern int wlp_prepare_tx_frame(struct device *, struct wlp *,
713 struct sk_buff *, struct uwb_dev_addr *);
714void wlp_reset_all(struct wlp *wlp);
715
716/**
717 * Initialize WSS
718 */
719static inline
720void wlp_wss_init(struct wlp_wss *wss)
721{
722 mutex_init(&wss->mutex);
723}
724
725static inline
726void wlp_init(struct wlp *wlp)
727{
728 INIT_LIST_HEAD(&wlp->neighbors);
729 mutex_init(&wlp->mutex);
730 mutex_init(&wlp->nbmutex);
731 wlp_wss_init(&wlp->wss);
732}
733
734
735#endif /* #ifndef __LINUX__WLP_H_ */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 5c158c477ac7..89a5a1231ffb 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -149,11 +149,11 @@ struct execute_work {
149 149
150extern struct workqueue_struct * 150extern struct workqueue_struct *
151__create_workqueue_key(const char *name, int singlethread, 151__create_workqueue_key(const char *name, int singlethread,
152 int freezeable, struct lock_class_key *key, 152 int freezeable, int rt, struct lock_class_key *key,
153 const char *lock_name); 153 const char *lock_name);
154 154
155#ifdef CONFIG_LOCKDEP 155#ifdef CONFIG_LOCKDEP
156#define __create_workqueue(name, singlethread, freezeable) \ 156#define __create_workqueue(name, singlethread, freezeable, rt) \
157({ \ 157({ \
158 static struct lock_class_key __key; \ 158 static struct lock_class_key __key; \
159 const char *__lock_name; \ 159 const char *__lock_name; \
@@ -164,17 +164,19 @@ __create_workqueue_key(const char *name, int singlethread,
164 __lock_name = #name; \ 164 __lock_name = #name; \
165 \ 165 \
166 __create_workqueue_key((name), (singlethread), \ 166 __create_workqueue_key((name), (singlethread), \
167 (freezeable), &__key, \ 167 (freezeable), (rt), &__key, \
168 __lock_name); \ 168 __lock_name); \
169}) 169})
170#else 170#else
171#define __create_workqueue(name, singlethread, freezeable) \ 171#define __create_workqueue(name, singlethread, freezeable, rt) \
172 __create_workqueue_key((name), (singlethread), (freezeable), NULL, NULL) 172 __create_workqueue_key((name), (singlethread), (freezeable), (rt), \
173 NULL, NULL)
173#endif 174#endif
174 175
175#define create_workqueue(name) __create_workqueue((name), 0, 0) 176#define create_workqueue(name) __create_workqueue((name), 0, 0, 0)
176#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) 177#define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1)
177#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) 178#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0)
179#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0)
178 180
179extern void destroy_workqueue(struct workqueue_struct *wq); 181extern void destroy_workqueue(struct workqueue_struct *wq);
180 182
diff --git a/include/media/v4l2-int-device.h b/include/media/v4l2-int-device.h
index c8b80e0f0651..9c2df41dbf92 100644
--- a/include/media/v4l2-int-device.h
+++ b/include/media/v4l2-int-device.h
@@ -84,6 +84,8 @@ struct v4l2_int_device {
84 void *priv; 84 void *priv;
85}; 85};
86 86
87void v4l2_int_device_try_attach_all(void);
88
87int v4l2_int_device_register(struct v4l2_int_device *d); 89int v4l2_int_device_register(struct v4l2_int_device *d);
88void v4l2_int_device_unregister(struct v4l2_int_device *d); 90void v4l2_int_device_unregister(struct v4l2_int_device *d);
89 91
@@ -96,6 +98,12 @@ int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg);
96 * 98 *
97 */ 99 */
98 100
101enum v4l2_power {
102 V4L2_POWER_OFF = 0,
103 V4L2_POWER_ON,
104 V4L2_POWER_STANDBY,
105};
106
99/* Slave interface type. */ 107/* Slave interface type. */
100enum v4l2_if_type { 108enum v4l2_if_type {
101 /* 109 /*
@@ -170,6 +178,9 @@ enum v4l2_int_ioctl_num {
170 vidioc_int_queryctrl_num, 178 vidioc_int_queryctrl_num,
171 vidioc_int_g_ctrl_num, 179 vidioc_int_g_ctrl_num,
172 vidioc_int_s_ctrl_num, 180 vidioc_int_s_ctrl_num,
181 vidioc_int_cropcap_num,
182 vidioc_int_g_crop_num,
183 vidioc_int_s_crop_num,
173 vidioc_int_g_parm_num, 184 vidioc_int_g_parm_num,
174 vidioc_int_s_parm_num, 185 vidioc_int_s_parm_num,
175 186
@@ -182,12 +193,19 @@ enum v4l2_int_ioctl_num {
182 vidioc_int_dev_init_num = 1000, 193 vidioc_int_dev_init_num = 1000,
183 /* Delinitialise the device at slave detach. */ 194 /* Delinitialise the device at slave detach. */
184 vidioc_int_dev_exit_num, 195 vidioc_int_dev_exit_num,
185 /* Set device power state: 0 is off, non-zero is on. */ 196 /* Set device power state. */
186 vidioc_int_s_power_num, 197 vidioc_int_s_power_num,
198 /*
199 * Get slave private data, e.g. platform-specific slave
200 * configuration used by the master.
201 */
202 vidioc_int_g_priv_num,
187 /* Get slave interface parameters. */ 203 /* Get slave interface parameters. */
188 vidioc_int_g_ifparm_num, 204 vidioc_int_g_ifparm_num,
189 /* Does the slave need to be reset after VIDIOC_DQBUF? */ 205 /* Does the slave need to be reset after VIDIOC_DQBUF? */
190 vidioc_int_g_needs_reset_num, 206 vidioc_int_g_needs_reset_num,
207 vidioc_int_enum_framesizes_num,
208 vidioc_int_enum_frameintervals_num,
191 209
192 /* 210 /*
193 * 211 *
@@ -261,14 +279,20 @@ V4L2_INT_WRAPPER_1(try_fmt_cap, struct v4l2_format, *);
261V4L2_INT_WRAPPER_1(queryctrl, struct v4l2_queryctrl, *); 279V4L2_INT_WRAPPER_1(queryctrl, struct v4l2_queryctrl, *);
262V4L2_INT_WRAPPER_1(g_ctrl, struct v4l2_control, *); 280V4L2_INT_WRAPPER_1(g_ctrl, struct v4l2_control, *);
263V4L2_INT_WRAPPER_1(s_ctrl, struct v4l2_control, *); 281V4L2_INT_WRAPPER_1(s_ctrl, struct v4l2_control, *);
282V4L2_INT_WRAPPER_1(cropcap, struct v4l2_cropcap, *);
283V4L2_INT_WRAPPER_1(g_crop, struct v4l2_crop, *);
284V4L2_INT_WRAPPER_1(s_crop, struct v4l2_crop, *);
264V4L2_INT_WRAPPER_1(g_parm, struct v4l2_streamparm, *); 285V4L2_INT_WRAPPER_1(g_parm, struct v4l2_streamparm, *);
265V4L2_INT_WRAPPER_1(s_parm, struct v4l2_streamparm, *); 286V4L2_INT_WRAPPER_1(s_parm, struct v4l2_streamparm, *);
266 287
267V4L2_INT_WRAPPER_0(dev_init); 288V4L2_INT_WRAPPER_0(dev_init);
268V4L2_INT_WRAPPER_0(dev_exit); 289V4L2_INT_WRAPPER_0(dev_exit);
269V4L2_INT_WRAPPER_1(s_power, int, ); 290V4L2_INT_WRAPPER_1(s_power, enum v4l2_power, );
291V4L2_INT_WRAPPER_1(g_priv, void, *);
270V4L2_INT_WRAPPER_1(g_ifparm, struct v4l2_ifparm, *); 292V4L2_INT_WRAPPER_1(g_ifparm, struct v4l2_ifparm, *);
271V4L2_INT_WRAPPER_1(g_needs_reset, void, *); 293V4L2_INT_WRAPPER_1(g_needs_reset, void, *);
294V4L2_INT_WRAPPER_1(enum_framesizes, struct v4l2_frmsizeenum, *);
295V4L2_INT_WRAPPER_1(enum_frameintervals, struct v4l2_frmivalenum, *);
272 296
273V4L2_INT_WRAPPER_0(reset); 297V4L2_INT_WRAPPER_0(reset);
274V4L2_INT_WRAPPER_0(init); 298V4L2_INT_WRAPPER_0(init);
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 0bef03add796..e6ba25b3d7c8 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -271,26 +271,38 @@ extern const char *v4l2_field_names[];
271extern const char *v4l2_type_names[]; 271extern const char *v4l2_type_names[];
272 272
273/* Compatibility layer interface -- v4l1-compat module */ 273/* Compatibility layer interface -- v4l1-compat module */
274typedef int (*v4l2_kioctl)(struct inode *inode, struct file *file, 274typedef int (*v4l2_kioctl)(struct file *file,
275 unsigned int cmd, void *arg); 275 unsigned int cmd, void *arg);
276#ifdef CONFIG_VIDEO_V4L1_COMPAT 276#ifdef CONFIG_VIDEO_V4L1_COMPAT
277int v4l_compat_translate_ioctl(struct inode *inode, struct file *file, 277int v4l_compat_translate_ioctl(struct file *file,
278 int cmd, void *arg, v4l2_kioctl driver_ioctl); 278 int cmd, void *arg, v4l2_kioctl driver_ioctl);
279#else 279#else
280#define v4l_compat_translate_ioctl(inode, file, cmd, arg, ioctl) (-EINVAL) 280#define v4l_compat_translate_ioctl(file, cmd, arg, ioctl) (-EINVAL)
281#endif 281#endif
282 282
283/* 32 Bits compatibility layer for 64 bits processors */ 283/* 32 Bits compatibility layer for 64 bits processors */
284extern long v4l_compat_ioctl32(struct file *file, unsigned int cmd, 284extern long v4l_compat_ioctl32(struct file *file, unsigned int cmd,
285 unsigned long arg); 285 unsigned long arg);
286 286
287extern int video_ioctl2(struct inode *inode, struct file *file,
288 unsigned int cmd, unsigned long arg);
289
290/* Include support for obsoleted stuff */ 287/* Include support for obsoleted stuff */
291extern int video_usercopy(struct inode *inode, struct file *file, 288extern int video_usercopy(struct inode *inode, struct file *file,
292 unsigned int cmd, unsigned long arg, 289 unsigned int cmd, unsigned long arg,
293 int (*func)(struct inode *inode, struct file *file, 290 int (*func)(struct inode *inode, struct file *file,
294 unsigned int cmd, void *arg)); 291 unsigned int cmd, void *arg));
295 292
293/* Standard handlers for V4L ioctl's */
294
295/* This prototype is used on fops.unlocked_ioctl */
296extern int __video_ioctl2(struct file *file,
297 unsigned int cmd, unsigned long arg);
298
299/* This prototype is used on fops.ioctl
300 * Since fops.ioctl enables Kernel Big Lock, it is preferred
301 * to use __video_ioctl2 instead.
302 * It should be noticed that there's no lock code inside
303 * video_ioctl2().
304 */
305extern int video_ioctl2(struct inode *inode, struct file *file,
306 unsigned int cmd, unsigned long arg);
307
296#endif /* _V4L2_IOCTL_H */ 308#endif /* _V4L2_IOCTL_H */
diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h
index 80471c2b6343..6ba4f1271d23 100644
--- a/include/media/videobuf-dvb.h
+++ b/include/media/videobuf-dvb.h
@@ -47,6 +47,7 @@ int videobuf_dvb_register_bus(struct videobuf_dvb_frontends *f,
47void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f); 47void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f);
48 48
49struct videobuf_dvb_frontend * videobuf_dvb_alloc_frontend(struct videobuf_dvb_frontends *f, int id); 49struct videobuf_dvb_frontend * videobuf_dvb_alloc_frontend(struct videobuf_dvb_frontends *f, int id);
50void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f);
50 51
51struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_frontends *f, int id); 52struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_frontends *f, int id);
52int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p); 53int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p);
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index d2c60c73619d..b77c1478c99f 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -56,9 +56,9 @@ enum p9_debug_flags {
56 P9_DEBUG_PKT = (1<<10), 56 P9_DEBUG_PKT = (1<<10),
57}; 57};
58 58
59#ifdef CONFIG_NET_9P_DEBUG
59extern unsigned int p9_debug_level; 60extern unsigned int p9_debug_level;
60 61
61#ifdef CONFIG_NET_9P_DEBUG
62#define P9_DPRINTK(level, format, arg...) \ 62#define P9_DPRINTK(level, format, arg...) \
63do { \ 63do { \
64 if ((p9_debug_level & level) == level) {\ 64 if ((p9_debug_level & level) == level) {\
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 1f17f3d93727..4012e07162e5 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -182,6 +182,7 @@ struct p9_fid {
182 struct list_head dlist; /* list of all fids attached to a dentry */ 182 struct list_head dlist; /* list of all fids attached to a dentry */
183}; 183};
184 184
185int p9_client_version(struct p9_client *);
185struct p9_client *p9_client_create(const char *dev_name, char *options); 186struct p9_client *p9_client_create(const char *dev_name, char *options);
186void p9_client_destroy(struct p9_client *clnt); 187void p9_client_destroy(struct p9_client *clnt);
187void p9_client_disconnect(struct p9_client *clnt); 188void p9_client_disconnect(struct p9_client *clnt);
@@ -206,6 +207,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst);
206struct p9_req_t *p9_tag_lookup(struct p9_client *, u16); 207struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
207void p9_client_cb(struct p9_client *c, struct p9_req_t *req); 208void p9_client_cb(struct p9_client *c, struct p9_req_t *req);
208 209
210int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
209int p9stat_read(char *, int, struct p9_wstat *, int); 211int p9stat_read(char *, int, struct p9_wstat *, int);
210void p9stat_free(struct p9_wstat *); 212void p9stat_free(struct p9_wstat *);
211 213
diff --git a/init/Kconfig b/init/Kconfig
index 113c74c07da4..44e9208f9c78 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -739,7 +739,8 @@ config VM_EVENT_COUNTERS
739 739
740config PCI_QUIRKS 740config PCI_QUIRKS
741 default y 741 default y
742 bool "Enable PCI quirk workarounds" if EMBEDDED && PCI 742 bool "Enable PCI quirk workarounds" if EMBEDDED
743 depends on PCI
743 help 744 help
744 This enables workarounds for various PCI chipset 745 This enables workarounds for various PCI chipset
745 bugs/quirks. Disable this only if your target machine is 746 bugs/quirks. Disable this only if your target machine is
diff --git a/init/main.c b/init/main.c
index 3e17a3bafe60..b038fa142041 100644
--- a/init/main.c
+++ b/init/main.c
@@ -52,6 +52,7 @@
52#include <linux/key.h> 52#include <linux/key.h>
53#include <linux/unwind.h> 53#include <linux/unwind.h>
54#include <linux/buffer_head.h> 54#include <linux/buffer_head.h>
55#include <linux/page_cgroup.h>
55#include <linux/debug_locks.h> 56#include <linux/debug_locks.h>
56#include <linux/debugobjects.h> 57#include <linux/debugobjects.h>
57#include <linux/lockdep.h> 58#include <linux/lockdep.h>
@@ -647,6 +648,7 @@ asmlinkage void __init start_kernel(void)
647 vmalloc_init(); 648 vmalloc_init();
648 vfs_caches_init_early(); 649 vfs_caches_init_early();
649 cpuset_init_early(); 650 cpuset_init_early();
651 page_cgroup_init();
650 mem_init(); 652 mem_init();
651 enable_debug_pagealloc(); 653 enable_debug_pagealloc();
652 cpu_hotplug_init(); 654 cpu_hotplug_init();
@@ -697,13 +699,7 @@ asmlinkage void __init start_kernel(void)
697} 699}
698 700
699static int initcall_debug; 701static int initcall_debug;
700 702core_param(initcall_debug, initcall_debug, bool, 0644);
701static int __init initcall_debug_setup(char *str)
702{
703 initcall_debug = 1;
704 return 1;
705}
706__setup("initcall_debug", initcall_debug_setup);
707 703
708int do_one_initcall(initcall_t fn) 704int do_one_initcall(initcall_t fn)
709{ 705{
@@ -773,8 +769,6 @@ static void __init do_initcalls(void)
773static void __init do_basic_setup(void) 769static void __init do_basic_setup(void)
774{ 770{
775 rcu_init_sched(); /* needed by module_init stage. */ 771 rcu_init_sched(); /* needed by module_init stage. */
776 /* drivers will send hotplug events */
777 init_workqueues();
778 usermodehelper_init(); 772 usermodehelper_init();
779 driver_init(); 773 driver_init();
780 init_irq_proc(); 774 init_irq_proc();
@@ -858,6 +852,8 @@ static int __init kernel_init(void * unused)
858 852
859 cad_pid = task_pid(current); 853 cad_pid = task_pid(current);
860 854
855 init_workqueues();
856
861 smp_prepare_cpus(setup_max_cpus); 857 smp_prepare_cpus(setup_max_cpus);
862 858
863 do_pre_smp_initcalls(); 859 do_pre_smp_initcalls();
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 4895fde4eb93..10b5092e9bfe 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -76,6 +76,7 @@ void dynamic_irq_cleanup(unsigned int irq)
76 desc->chip_data = NULL; 76 desc->chip_data = NULL;
77 desc->handle_irq = handle_bad_irq; 77 desc->handle_irq = handle_bad_irq;
78 desc->chip = &no_irq_chip; 78 desc->chip = &no_irq_chip;
79 desc->name = NULL;
79 spin_unlock_irqrestore(&desc->lock, flags); 80 spin_unlock_irqrestore(&desc->lock, flags);
80} 81}
81 82
@@ -127,7 +128,7 @@ int set_irq_type(unsigned int irq, unsigned int type)
127 return 0; 128 return 0;
128 129
129 spin_lock_irqsave(&desc->lock, flags); 130 spin_lock_irqsave(&desc->lock, flags);
130 ret = __irq_set_trigger(desc, irq, flags); 131 ret = __irq_set_trigger(desc, irq, type);
131 spin_unlock_irqrestore(&desc->lock, flags); 132 spin_unlock_irqrestore(&desc->lock, flags);
132 return ret; 133 return ret;
133} 134}
diff --git a/kernel/module.c b/kernel/module.c
index 0d8d21ee792c..c0f1826e2d9e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -42,6 +42,7 @@
42#include <linux/string.h> 42#include <linux/string.h>
43#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/unwind.h> 44#include <linux/unwind.h>
45#include <linux/rculist.h>
45#include <asm/uaccess.h> 46#include <asm/uaccess.h>
46#include <asm/cacheflush.h> 47#include <asm/cacheflush.h>
47#include <linux/license.h> 48#include <linux/license.h>
@@ -63,7 +64,7 @@
63#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) 64#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
64 65
65/* List of modules, protected by module_mutex or preempt_disable 66/* List of modules, protected by module_mutex or preempt_disable
66 * (add/delete uses stop_machine). */ 67 * (delete uses stop_machine/add uses RCU list operations). */
67static DEFINE_MUTEX(module_mutex); 68static DEFINE_MUTEX(module_mutex);
68static LIST_HEAD(modules); 69static LIST_HEAD(modules);
69 70
@@ -132,6 +133,29 @@ static unsigned int find_sec(Elf_Ehdr *hdr,
132 return 0; 133 return 0;
133} 134}
134 135
136/* Find a module section, or NULL. */
137static void *section_addr(Elf_Ehdr *hdr, Elf_Shdr *shdrs,
138 const char *secstrings, const char *name)
139{
140 /* Section 0 has sh_addr 0. */
141 return (void *)shdrs[find_sec(hdr, shdrs, secstrings, name)].sh_addr;
142}
143
144/* Find a module section, or NULL. Fill in number of "objects" in section. */
145static void *section_objs(Elf_Ehdr *hdr,
146 Elf_Shdr *sechdrs,
147 const char *secstrings,
148 const char *name,
149 size_t object_size,
150 unsigned int *num)
151{
152 unsigned int sec = find_sec(hdr, sechdrs, secstrings, name);
153
154 /* Section 0 has sh_addr 0 and sh_size 0. */
155 *num = sechdrs[sec].sh_size / object_size;
156 return (void *)sechdrs[sec].sh_addr;
157}
158
135/* Provided by the linker */ 159/* Provided by the linker */
136extern const struct kernel_symbol __start___ksymtab[]; 160extern const struct kernel_symbol __start___ksymtab[];
137extern const struct kernel_symbol __stop___ksymtab[]; 161extern const struct kernel_symbol __stop___ksymtab[];
@@ -218,7 +242,7 @@ static bool each_symbol(bool (*fn)(const struct symsearch *arr,
218 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) 242 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
219 return true; 243 return true;
220 244
221 list_for_each_entry(mod, &modules, list) { 245 list_for_each_entry_rcu(mod, &modules, list) {
222 struct symsearch arr[] = { 246 struct symsearch arr[] = {
223 { mod->syms, mod->syms + mod->num_syms, mod->crcs, 247 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
224 NOT_GPL_ONLY, false }, 248 NOT_GPL_ONLY, false },
@@ -1394,17 +1418,6 @@ static void mod_kobject_remove(struct module *mod)
1394} 1418}
1395 1419
1396/* 1420/*
1397 * link the module with the whole machine is stopped with interrupts off
1398 * - this defends against kallsyms not taking locks
1399 */
1400static int __link_module(void *_mod)
1401{
1402 struct module *mod = _mod;
1403 list_add(&mod->list, &modules);
1404 return 0;
1405}
1406
1407/*
1408 * unlink the module with the whole machine is stopped with interrupts off 1421 * unlink the module with the whole machine is stopped with interrupts off
1409 * - this defends against kallsyms not taking locks 1422 * - this defends against kallsyms not taking locks
1410 */ 1423 */
@@ -1789,32 +1802,20 @@ static inline void add_kallsyms(struct module *mod,
1789} 1802}
1790#endif /* CONFIG_KALLSYMS */ 1803#endif /* CONFIG_KALLSYMS */
1791 1804
1792#ifdef CONFIG_DYNAMIC_PRINTK_DEBUG 1805static void dynamic_printk_setup(struct mod_debug *debug, unsigned int num)
1793static void dynamic_printk_setup(Elf_Shdr *sechdrs, unsigned int verboseindex)
1794{ 1806{
1795 struct mod_debug *debug_info; 1807#ifdef CONFIG_DYNAMIC_PRINTK_DEBUG
1796 unsigned long pos, end; 1808 unsigned int i;
1797 unsigned int num_verbose;
1798
1799 pos = sechdrs[verboseindex].sh_addr;
1800 num_verbose = sechdrs[verboseindex].sh_size /
1801 sizeof(struct mod_debug);
1802 end = pos + (num_verbose * sizeof(struct mod_debug));
1803 1809
1804 for (; pos < end; pos += sizeof(struct mod_debug)) { 1810 for (i = 0; i < num; i++) {
1805 debug_info = (struct mod_debug *)pos; 1811 register_dynamic_debug_module(debug[i].modname,
1806 register_dynamic_debug_module(debug_info->modname, 1812 debug[i].type,
1807 debug_info->type, debug_info->logical_modname, 1813 debug[i].logical_modname,
1808 debug_info->flag_names, debug_info->hash, 1814 debug[i].flag_names,
1809 debug_info->hash2); 1815 debug[i].hash, debug[i].hash2);
1810 } 1816 }
1811}
1812#else
1813static inline void dynamic_printk_setup(Elf_Shdr *sechdrs,
1814 unsigned int verboseindex)
1815{
1816}
1817#endif /* CONFIG_DYNAMIC_PRINTK_DEBUG */ 1817#endif /* CONFIG_DYNAMIC_PRINTK_DEBUG */
1818}
1818 1819
1819static void *module_alloc_update_bounds(unsigned long size) 1820static void *module_alloc_update_bounds(unsigned long size)
1820{ 1821{
@@ -1843,37 +1844,14 @@ static noinline struct module *load_module(void __user *umod,
1843 unsigned int i; 1844 unsigned int i;
1844 unsigned int symindex = 0; 1845 unsigned int symindex = 0;
1845 unsigned int strindex = 0; 1846 unsigned int strindex = 0;
1846 unsigned int setupindex; 1847 unsigned int modindex, versindex, infoindex, pcpuindex;
1847 unsigned int exindex;
1848 unsigned int exportindex;
1849 unsigned int modindex;
1850 unsigned int obsparmindex;
1851 unsigned int infoindex;
1852 unsigned int gplindex;
1853 unsigned int crcindex;
1854 unsigned int gplcrcindex;
1855 unsigned int versindex;
1856 unsigned int pcpuindex;
1857 unsigned int gplfutureindex;
1858 unsigned int gplfuturecrcindex;
1859 unsigned int unwindex = 0; 1848 unsigned int unwindex = 0;
1860#ifdef CONFIG_UNUSED_SYMBOLS 1849 unsigned int num_kp, num_mcount;
1861 unsigned int unusedindex; 1850 struct kernel_param *kp;
1862 unsigned int unusedcrcindex;
1863 unsigned int unusedgplindex;
1864 unsigned int unusedgplcrcindex;
1865#endif
1866 unsigned int markersindex;
1867 unsigned int markersstringsindex;
1868 unsigned int verboseindex;
1869 unsigned int tracepointsindex;
1870 unsigned int tracepointsstringsindex;
1871 unsigned int mcountindex;
1872 struct module *mod; 1851 struct module *mod;
1873 long err = 0; 1852 long err = 0;
1874 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 1853 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
1875 void *mseg; 1854 unsigned long *mseg;
1876 struct exception_table_entry *extable;
1877 mm_segment_t old_fs; 1855 mm_segment_t old_fs;
1878 1856
1879 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", 1857 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
@@ -1937,6 +1915,7 @@ static noinline struct module *load_module(void __user *umod,
1937 err = -ENOEXEC; 1915 err = -ENOEXEC;
1938 goto free_hdr; 1916 goto free_hdr;
1939 } 1917 }
1918 /* This is temporary: point mod into copy of data. */
1940 mod = (void *)sechdrs[modindex].sh_addr; 1919 mod = (void *)sechdrs[modindex].sh_addr;
1941 1920
1942 if (symindex == 0) { 1921 if (symindex == 0) {
@@ -1946,22 +1925,6 @@ static noinline struct module *load_module(void __user *umod,
1946 goto free_hdr; 1925 goto free_hdr;
1947 } 1926 }
1948 1927
1949 /* Optional sections */
1950 exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab");
1951 gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl");
1952 gplfutureindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl_future");
1953 crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab");
1954 gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl");
1955 gplfuturecrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl_future");
1956#ifdef CONFIG_UNUSED_SYMBOLS
1957 unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused");
1958 unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl");
1959 unusedcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused");
1960 unusedgplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl");
1961#endif
1962 setupindex = find_sec(hdr, sechdrs, secstrings, "__param");
1963 exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table");
1964 obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm");
1965 versindex = find_sec(hdr, sechdrs, secstrings, "__versions"); 1928 versindex = find_sec(hdr, sechdrs, secstrings, "__versions");
1966 infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo"); 1929 infoindex = find_sec(hdr, sechdrs, secstrings, ".modinfo");
1967 pcpuindex = find_pcpusec(hdr, sechdrs, secstrings); 1930 pcpuindex = find_pcpusec(hdr, sechdrs, secstrings);
@@ -2117,42 +2080,57 @@ static noinline struct module *load_module(void __user *umod,
2117 if (err < 0) 2080 if (err < 0)
2118 goto cleanup; 2081 goto cleanup;
2119 2082
2120 /* Set up EXPORTed & EXPORT_GPLed symbols (section 0 is 0 length) */ 2083 /* Now we've got everything in the final locations, we can
2121 mod->num_syms = sechdrs[exportindex].sh_size / sizeof(*mod->syms); 2084 * find optional sections. */
2122 mod->syms = (void *)sechdrs[exportindex].sh_addr; 2085 kp = section_objs(hdr, sechdrs, secstrings, "__param", sizeof(*kp),
2123 if (crcindex) 2086 &num_kp);
2124 mod->crcs = (void *)sechdrs[crcindex].sh_addr; 2087 mod->syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab",
2125 mod->num_gpl_syms = sechdrs[gplindex].sh_size / sizeof(*mod->gpl_syms); 2088 sizeof(*mod->syms), &mod->num_syms);
2126 mod->gpl_syms = (void *)sechdrs[gplindex].sh_addr; 2089 mod->crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab");
2127 if (gplcrcindex) 2090 mod->gpl_syms = section_objs(hdr, sechdrs, secstrings, "__ksymtab_gpl",
2128 mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr; 2091 sizeof(*mod->gpl_syms),
2129 mod->num_gpl_future_syms = sechdrs[gplfutureindex].sh_size / 2092 &mod->num_gpl_syms);
2130 sizeof(*mod->gpl_future_syms); 2093 mod->gpl_crcs = section_addr(hdr, sechdrs, secstrings, "__kcrctab_gpl");
2131 mod->gpl_future_syms = (void *)sechdrs[gplfutureindex].sh_addr; 2094 mod->gpl_future_syms = section_objs(hdr, sechdrs, secstrings,
2132 if (gplfuturecrcindex) 2095 "__ksymtab_gpl_future",
2133 mod->gpl_future_crcs = (void *)sechdrs[gplfuturecrcindex].sh_addr; 2096 sizeof(*mod->gpl_future_syms),
2097 &mod->num_gpl_future_syms);
2098 mod->gpl_future_crcs = section_addr(hdr, sechdrs, secstrings,
2099 "__kcrctab_gpl_future");
2134 2100
2135#ifdef CONFIG_UNUSED_SYMBOLS 2101#ifdef CONFIG_UNUSED_SYMBOLS
2136 mod->num_unused_syms = sechdrs[unusedindex].sh_size / 2102 mod->unused_syms = section_objs(hdr, sechdrs, secstrings,
2137 sizeof(*mod->unused_syms); 2103 "__ksymtab_unused",
2138 mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size / 2104 sizeof(*mod->unused_syms),
2139 sizeof(*mod->unused_gpl_syms); 2105 &mod->num_unused_syms);
2140 mod->unused_syms = (void *)sechdrs[unusedindex].sh_addr; 2106 mod->unused_crcs = section_addr(hdr, sechdrs, secstrings,
2141 if (unusedcrcindex) 2107 "__kcrctab_unused");
2142 mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr; 2108 mod->unused_gpl_syms = section_objs(hdr, sechdrs, secstrings,
2143 mod->unused_gpl_syms = (void *)sechdrs[unusedgplindex].sh_addr; 2109 "__ksymtab_unused_gpl",
2144 if (unusedgplcrcindex) 2110 sizeof(*mod->unused_gpl_syms),
2145 mod->unused_gpl_crcs 2111 &mod->num_unused_gpl_syms);
2146 = (void *)sechdrs[unusedgplcrcindex].sh_addr; 2112 mod->unused_gpl_crcs = section_addr(hdr, sechdrs, secstrings,
2113 "__kcrctab_unused_gpl");
2114#endif
2115
2116#ifdef CONFIG_MARKERS
2117 mod->markers = section_objs(hdr, sechdrs, secstrings, "__markers",
2118 sizeof(*mod->markers), &mod->num_markers);
2119#endif
2120#ifdef CONFIG_TRACEPOINTS
2121 mod->tracepoints = section_objs(hdr, sechdrs, secstrings,
2122 "__tracepoints",
2123 sizeof(*mod->tracepoints),
2124 &mod->num_tracepoints);
2147#endif 2125#endif
2148 2126
2149#ifdef CONFIG_MODVERSIONS 2127#ifdef CONFIG_MODVERSIONS
2150 if ((mod->num_syms && !crcindex) 2128 if ((mod->num_syms && !mod->crcs)
2151 || (mod->num_gpl_syms && !gplcrcindex) 2129 || (mod->num_gpl_syms && !mod->gpl_crcs)
2152 || (mod->num_gpl_future_syms && !gplfuturecrcindex) 2130 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2153#ifdef CONFIG_UNUSED_SYMBOLS 2131#ifdef CONFIG_UNUSED_SYMBOLS
2154 || (mod->num_unused_syms && !unusedcrcindex) 2132 || (mod->num_unused_syms && !mod->unused_crcs)
2155 || (mod->num_unused_gpl_syms && !unusedgplcrcindex) 2133 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2156#endif 2134#endif
2157 ) { 2135 ) {
2158 printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name); 2136 printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name);
@@ -2161,16 +2139,6 @@ static noinline struct module *load_module(void __user *umod,
2161 goto cleanup; 2139 goto cleanup;
2162 } 2140 }
2163#endif 2141#endif
2164 markersindex = find_sec(hdr, sechdrs, secstrings, "__markers");
2165 markersstringsindex = find_sec(hdr, sechdrs, secstrings,
2166 "__markers_strings");
2167 verboseindex = find_sec(hdr, sechdrs, secstrings, "__verbose");
2168 tracepointsindex = find_sec(hdr, sechdrs, secstrings, "__tracepoints");
2169 tracepointsstringsindex = find_sec(hdr, sechdrs, secstrings,
2170 "__tracepoints_strings");
2171
2172 mcountindex = find_sec(hdr, sechdrs, secstrings,
2173 "__mcount_loc");
2174 2142
2175 /* Now do relocations. */ 2143 /* Now do relocations. */
2176 for (i = 1; i < hdr->e_shnum; i++) { 2144 for (i = 1; i < hdr->e_shnum; i++) {
@@ -2193,28 +2161,16 @@ static noinline struct module *load_module(void __user *umod,
2193 if (err < 0) 2161 if (err < 0)
2194 goto cleanup; 2162 goto cleanup;
2195 } 2163 }
2196#ifdef CONFIG_MARKERS
2197 mod->markers = (void *)sechdrs[markersindex].sh_addr;
2198 mod->num_markers =
2199 sechdrs[markersindex].sh_size / sizeof(*mod->markers);
2200#endif
2201#ifdef CONFIG_TRACEPOINTS
2202 mod->tracepoints = (void *)sechdrs[tracepointsindex].sh_addr;
2203 mod->num_tracepoints =
2204 sechdrs[tracepointsindex].sh_size / sizeof(*mod->tracepoints);
2205#endif
2206
2207 2164
2208 /* Find duplicate symbols */ 2165 /* Find duplicate symbols */
2209 err = verify_export_symbols(mod); 2166 err = verify_export_symbols(mod);
2210
2211 if (err < 0) 2167 if (err < 0)
2212 goto cleanup; 2168 goto cleanup;
2213 2169
2214 /* Set up and sort exception table */ 2170 /* Set up and sort exception table */
2215 mod->num_exentries = sechdrs[exindex].sh_size / sizeof(*mod->extable); 2171 mod->extable = section_objs(hdr, sechdrs, secstrings, "__ex_table",
2216 mod->extable = extable = (void *)sechdrs[exindex].sh_addr; 2172 sizeof(*mod->extable), &mod->num_exentries);
2217 sort_extable(extable, extable + mod->num_exentries); 2173 sort_extable(mod->extable, mod->extable + mod->num_exentries);
2218 2174
2219 /* Finally, copy percpu area over. */ 2175 /* Finally, copy percpu area over. */
2220 percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr, 2176 percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr,
@@ -2223,11 +2179,17 @@ static noinline struct module *load_module(void __user *umod,
2223 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings); 2179 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
2224 2180
2225 if (!mod->taints) { 2181 if (!mod->taints) {
2182 struct mod_debug *debug;
2183 unsigned int num_debug;
2184
2226#ifdef CONFIG_MARKERS 2185#ifdef CONFIG_MARKERS
2227 marker_update_probe_range(mod->markers, 2186 marker_update_probe_range(mod->markers,
2228 mod->markers + mod->num_markers); 2187 mod->markers + mod->num_markers);
2229#endif 2188#endif
2230 dynamic_printk_setup(sechdrs, verboseindex); 2189 debug = section_objs(hdr, sechdrs, secstrings, "__verbose",
2190 sizeof(*debug), &num_debug);
2191 dynamic_printk_setup(debug, num_debug);
2192
2231#ifdef CONFIG_TRACEPOINTS 2193#ifdef CONFIG_TRACEPOINTS
2232 tracepoint_update_probe_range(mod->tracepoints, 2194 tracepoint_update_probe_range(mod->tracepoints,
2233 mod->tracepoints + mod->num_tracepoints); 2195 mod->tracepoints + mod->num_tracepoints);
@@ -2235,8 +2197,9 @@ static noinline struct module *load_module(void __user *umod,
2235 } 2197 }
2236 2198
2237 /* sechdrs[0].sh_size is always zero */ 2199 /* sechdrs[0].sh_size is always zero */
2238 mseg = (void *)sechdrs[mcountindex].sh_addr; 2200 mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc",
2239 ftrace_init_module(mseg, mseg + sechdrs[mcountindex].sh_size); 2201 sizeof(*mseg), &num_mcount);
2202 ftrace_init_module(mseg, mseg + num_mcount);
2240 2203
2241 err = module_finalize(hdr, sechdrs, mod); 2204 err = module_finalize(hdr, sechdrs, mod);
2242 if (err < 0) 2205 if (err < 0)
@@ -2261,30 +2224,24 @@ static noinline struct module *load_module(void __user *umod,
2261 set_fs(old_fs); 2224 set_fs(old_fs);
2262 2225
2263 mod->args = args; 2226 mod->args = args;
2264 if (obsparmindex) 2227 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
2265 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n", 2228 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
2266 mod->name); 2229 mod->name);
2267 2230
2268 /* Now sew it into the lists so we can get lockdep and oops 2231 /* Now sew it into the lists so we can get lockdep and oops
2269 * info during argument parsing. Noone should access us, since 2232 * info during argument parsing. Noone should access us, since
2270 * strong_try_module_get() will fail. */ 2233 * strong_try_module_get() will fail.
2271 stop_machine(__link_module, mod, NULL); 2234 * lockdep/oops can run asynchronous, so use the RCU list insertion
2272 2235 * function to insert in a way safe to concurrent readers.
2273 /* Size of section 0 is 0, so this works well if no params */ 2236 * The mutex protects against concurrent writers.
2274 err = parse_args(mod->name, mod->args, 2237 */
2275 (struct kernel_param *) 2238 list_add_rcu(&mod->list, &modules);
2276 sechdrs[setupindex].sh_addr, 2239
2277 sechdrs[setupindex].sh_size 2240 err = parse_args(mod->name, mod->args, kp, num_kp, NULL);
2278 / sizeof(struct kernel_param),
2279 NULL);
2280 if (err < 0) 2241 if (err < 0)
2281 goto unlink; 2242 goto unlink;
2282 2243
2283 err = mod_sysfs_setup(mod, 2244 err = mod_sysfs_setup(mod, kp, num_kp);
2284 (struct kernel_param *)
2285 sechdrs[setupindex].sh_addr,
2286 sechdrs[setupindex].sh_size
2287 / sizeof(struct kernel_param));
2288 if (err < 0) 2245 if (err < 0)
2289 goto unlink; 2246 goto unlink;
2290 add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs); 2247 add_sect_attrs(mod, hdr->e_shnum, secstrings, sechdrs);
@@ -2473,7 +2430,7 @@ const char *module_address_lookup(unsigned long addr,
2473 const char *ret = NULL; 2430 const char *ret = NULL;
2474 2431
2475 preempt_disable(); 2432 preempt_disable();
2476 list_for_each_entry(mod, &modules, list) { 2433 list_for_each_entry_rcu(mod, &modules, list) {
2477 if (within(addr, mod->module_init, mod->init_size) 2434 if (within(addr, mod->module_init, mod->init_size)
2478 || within(addr, mod->module_core, mod->core_size)) { 2435 || within(addr, mod->module_core, mod->core_size)) {
2479 if (modname) 2436 if (modname)
@@ -2496,7 +2453,7 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
2496 struct module *mod; 2453 struct module *mod;
2497 2454
2498 preempt_disable(); 2455 preempt_disable();
2499 list_for_each_entry(mod, &modules, list) { 2456 list_for_each_entry_rcu(mod, &modules, list) {
2500 if (within(addr, mod->module_init, mod->init_size) || 2457 if (within(addr, mod->module_init, mod->init_size) ||
2501 within(addr, mod->module_core, mod->core_size)) { 2458 within(addr, mod->module_core, mod->core_size)) {
2502 const char *sym; 2459 const char *sym;
@@ -2520,7 +2477,7 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
2520 struct module *mod; 2477 struct module *mod;
2521 2478
2522 preempt_disable(); 2479 preempt_disable();
2523 list_for_each_entry(mod, &modules, list) { 2480 list_for_each_entry_rcu(mod, &modules, list) {
2524 if (within(addr, mod->module_init, mod->init_size) || 2481 if (within(addr, mod->module_init, mod->init_size) ||
2525 within(addr, mod->module_core, mod->core_size)) { 2482 within(addr, mod->module_core, mod->core_size)) {
2526 const char *sym; 2483 const char *sym;
@@ -2547,7 +2504,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2547 struct module *mod; 2504 struct module *mod;
2548 2505
2549 preempt_disable(); 2506 preempt_disable();
2550 list_for_each_entry(mod, &modules, list) { 2507 list_for_each_entry_rcu(mod, &modules, list) {
2551 if (symnum < mod->num_symtab) { 2508 if (symnum < mod->num_symtab) {
2552 *value = mod->symtab[symnum].st_value; 2509 *value = mod->symtab[symnum].st_value;
2553 *type = mod->symtab[symnum].st_info; 2510 *type = mod->symtab[symnum].st_info;
@@ -2590,7 +2547,7 @@ unsigned long module_kallsyms_lookup_name(const char *name)
2590 ret = mod_find_symname(mod, colon+1); 2547 ret = mod_find_symname(mod, colon+1);
2591 *colon = ':'; 2548 *colon = ':';
2592 } else { 2549 } else {
2593 list_for_each_entry(mod, &modules, list) 2550 list_for_each_entry_rcu(mod, &modules, list)
2594 if ((ret = mod_find_symname(mod, name)) != 0) 2551 if ((ret = mod_find_symname(mod, name)) != 0)
2595 break; 2552 break;
2596 } 2553 }
@@ -2693,7 +2650,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
2693 struct module *mod; 2650 struct module *mod;
2694 2651
2695 preempt_disable(); 2652 preempt_disable();
2696 list_for_each_entry(mod, &modules, list) { 2653 list_for_each_entry_rcu(mod, &modules, list) {
2697 if (mod->num_exentries == 0) 2654 if (mod->num_exentries == 0)
2698 continue; 2655 continue;
2699 2656
@@ -2719,7 +2676,7 @@ int is_module_address(unsigned long addr)
2719 2676
2720 preempt_disable(); 2677 preempt_disable();
2721 2678
2722 list_for_each_entry(mod, &modules, list) { 2679 list_for_each_entry_rcu(mod, &modules, list) {
2723 if (within(addr, mod->module_core, mod->core_size)) { 2680 if (within(addr, mod->module_core, mod->core_size)) {
2724 preempt_enable(); 2681 preempt_enable();
2725 return 1; 2682 return 1;
@@ -2740,7 +2697,7 @@ struct module *__module_text_address(unsigned long addr)
2740 if (addr < module_addr_min || addr > module_addr_max) 2697 if (addr < module_addr_min || addr > module_addr_max)
2741 return NULL; 2698 return NULL;
2742 2699
2743 list_for_each_entry(mod, &modules, list) 2700 list_for_each_entry_rcu(mod, &modules, list)
2744 if (within(addr, mod->module_init, mod->init_text_size) 2701 if (within(addr, mod->module_init, mod->init_text_size)
2745 || within(addr, mod->module_core, mod->core_text_size)) 2702 || within(addr, mod->module_core, mod->core_text_size))
2746 return mod; 2703 return mod;
@@ -2765,8 +2722,11 @@ void print_modules(void)
2765 char buf[8]; 2722 char buf[8];
2766 2723
2767 printk("Modules linked in:"); 2724 printk("Modules linked in:");
2768 list_for_each_entry(mod, &modules, list) 2725 /* Most callers should already have preempt disabled, but make sure */
2726 preempt_disable();
2727 list_for_each_entry_rcu(mod, &modules, list)
2769 printk(" %s%s", mod->name, module_flags(mod, buf)); 2728 printk(" %s%s", mod->name, module_flags(mod, buf));
2729 preempt_enable();
2770 if (last_unloaded_module[0]) 2730 if (last_unloaded_module[0])
2771 printk(" [last unloaded: %s]", last_unloaded_module); 2731 printk(" [last unloaded: %s]", last_unloaded_module);
2772 printk("\n"); 2732 printk("\n");
diff --git a/kernel/panic.c b/kernel/panic.c
index bda561ef3cdf..6513aac8e992 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -34,13 +34,6 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
34 34
35EXPORT_SYMBOL(panic_notifier_list); 35EXPORT_SYMBOL(panic_notifier_list);
36 36
37static int __init panic_setup(char *str)
38{
39 panic_timeout = simple_strtoul(str, NULL, 0);
40 return 1;
41}
42__setup("panic=", panic_setup);
43
44static long no_blink(long time) 37static long no_blink(long time)
45{ 38{
46 return 0; 39 return 0;
@@ -218,13 +211,6 @@ void add_taint(unsigned flag)
218} 211}
219EXPORT_SYMBOL(add_taint); 212EXPORT_SYMBOL(add_taint);
220 213
221static int __init pause_on_oops_setup(char *str)
222{
223 pause_on_oops = simple_strtoul(str, NULL, 0);
224 return 1;
225}
226__setup("pause_on_oops=", pause_on_oops_setup);
227
228static void spin_msec(int msecs) 214static void spin_msec(int msecs)
229{ 215{
230 int i; 216 int i;
@@ -384,3 +370,6 @@ void __stack_chk_fail(void)
384} 370}
385EXPORT_SYMBOL(__stack_chk_fail); 371EXPORT_SYMBOL(__stack_chk_fail);
386#endif 372#endif
373
374core_param(panic, panic_timeout, int, 0644);
375core_param(pause_on_oops, pause_on_oops, int, 0644);
diff --git a/kernel/params.c b/kernel/params.c
index afc46a23eb6d..b077f1b045d3 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -373,6 +373,8 @@ int param_get_string(char *buffer, struct kernel_param *kp)
373} 373}
374 374
375/* sysfs output in /sys/modules/XYZ/parameters/ */ 375/* sysfs output in /sys/modules/XYZ/parameters/ */
376#define to_module_attr(n) container_of(n, struct module_attribute, attr);
377#define to_module_kobject(n) container_of(n, struct module_kobject, kobj);
376 378
377extern struct kernel_param __start___param[], __stop___param[]; 379extern struct kernel_param __start___param[], __stop___param[];
378 380
@@ -384,6 +386,7 @@ struct param_attribute
384 386
385struct module_param_attrs 387struct module_param_attrs
386{ 388{
389 unsigned int num;
387 struct attribute_group grp; 390 struct attribute_group grp;
388 struct param_attribute attrs[0]; 391 struct param_attribute attrs[0];
389}; 392};
@@ -434,69 +437,84 @@ static ssize_t param_attr_store(struct module_attribute *mattr,
434 437
435#ifdef CONFIG_SYSFS 438#ifdef CONFIG_SYSFS
436/* 439/*
437 * param_sysfs_setup - setup sysfs support for one module or KBUILD_MODNAME 440 * add_sysfs_param - add a parameter to sysfs
438 * @mk: struct module_kobject (contains parent kobject) 441 * @mk: struct module_kobject
439 * @kparam: array of struct kernel_param, the actual parameter definitions 442 * @kparam: the actual parameter definition to add to sysfs
440 * @num_params: number of entries in array 443 * @name: name of parameter
441 * @name_skip: offset where the parameter name start in kparam[].name. Needed for built-in "modules"
442 * 444 *
443 * Create a kobject for a (per-module) group of parameters, and create files 445 * Create a kobject if for a (per-module) parameter if mp NULL, and
444 * in sysfs. A pointer to the param_kobject is returned on success, 446 * create file in sysfs. Returns an error on out of memory. Always cleans up
445 * NULL if there's no parameter to export, or other ERR_PTR(err). 447 * if there's an error.
446 */ 448 */
447static __modinit struct module_param_attrs * 449static __modinit int add_sysfs_param(struct module_kobject *mk,
448param_sysfs_setup(struct module_kobject *mk, 450 struct kernel_param *kp,
449 struct kernel_param *kparam, 451 const char *name)
450 unsigned int num_params,
451 unsigned int name_skip)
452{ 452{
453 struct module_param_attrs *mp; 453 struct module_param_attrs *new;
454 unsigned int valid_attrs = 0; 454 struct attribute **attrs;
455 unsigned int i, size[2]; 455 int err, num;
456 struct param_attribute *pattr; 456
457 struct attribute **gattr; 457 /* We don't bother calling this with invisible parameters. */
458 int err; 458 BUG_ON(!kp->perm);
459 459
460 for (i=0; i<num_params; i++) { 460 if (!mk->mp) {
461 if (kparam[i].perm) 461 num = 0;
462 valid_attrs++; 462 attrs = NULL;
463 } else {
464 num = mk->mp->num;
465 attrs = mk->mp->grp.attrs;
463 } 466 }
464 467
465 if (!valid_attrs) 468 /* Enlarge. */
466 return NULL; 469 new = krealloc(mk->mp,
467 470 sizeof(*mk->mp) + sizeof(mk->mp->attrs[0]) * (num+1),
468 size[0] = ALIGN(sizeof(*mp) + 471 GFP_KERNEL);
469 valid_attrs * sizeof(mp->attrs[0]), 472 if (!new) {
470 sizeof(mp->grp.attrs[0])); 473 kfree(mk->mp);
471 size[1] = (valid_attrs + 1) * sizeof(mp->grp.attrs[0]); 474 err = -ENOMEM;
472 475 goto fail;
473 mp = kzalloc(size[0] + size[1], GFP_KERNEL); 476 }
474 if (!mp) 477 attrs = krealloc(attrs, sizeof(new->grp.attrs[0])*(num+2), GFP_KERNEL);
475 return ERR_PTR(-ENOMEM); 478 if (!attrs) {
479 err = -ENOMEM;
480 goto fail_free_new;
481 }
476 482
477 mp->grp.name = "parameters"; 483 /* Sysfs wants everything zeroed. */
478 mp->grp.attrs = (void *)mp + size[0]; 484 memset(new, 0, sizeof(*new));
485 memset(&new->attrs[num], 0, sizeof(new->attrs[num]));
486 memset(&attrs[num], 0, sizeof(attrs[num]));
487 new->grp.name = "parameters";
488 new->grp.attrs = attrs;
489
490 /* Tack new one on the end. */
491 new->attrs[num].param = kp;
492 new->attrs[num].mattr.show = param_attr_show;
493 new->attrs[num].mattr.store = param_attr_store;
494 new->attrs[num].mattr.attr.name = (char *)name;
495 new->attrs[num].mattr.attr.mode = kp->perm;
496 new->num = num+1;
497
498 /* Fix up all the pointers, since krealloc can move us */
499 for (num = 0; num < new->num; num++)
500 new->grp.attrs[num] = &new->attrs[num].mattr.attr;
501 new->grp.attrs[num] = NULL;
502
503 mk->mp = new;
504 return 0;
479 505
480 pattr = &mp->attrs[0]; 506fail_free_new:
481 gattr = &mp->grp.attrs[0]; 507 kfree(new);
482 for (i = 0; i < num_params; i++) { 508fail:
483 struct kernel_param *kp = &kparam[i]; 509 mk->mp = NULL;
484 if (kp->perm) { 510 return err;
485 pattr->param = kp; 511}
486 pattr->mattr.show = param_attr_show;
487 pattr->mattr.store = param_attr_store;
488 pattr->mattr.attr.name = (char *)&kp->name[name_skip];
489 pattr->mattr.attr.mode = kp->perm;
490 *(gattr++) = &(pattr++)->mattr.attr;
491 }
492 }
493 *gattr = NULL;
494 512
495 if ((err = sysfs_create_group(&mk->kobj, &mp->grp))) { 513static void free_module_param_attrs(struct module_kobject *mk)
496 kfree(mp); 514{
497 return ERR_PTR(err); 515 kfree(mk->mp->grp.attrs);
498 } 516 kfree(mk->mp);
499 return mp; 517 mk->mp = NULL;
500} 518}
501 519
502#ifdef CONFIG_MODULES 520#ifdef CONFIG_MODULES
@@ -506,21 +524,33 @@ param_sysfs_setup(struct module_kobject *mk,
506 * @kparam: module parameters (array) 524 * @kparam: module parameters (array)
507 * @num_params: number of module parameters 525 * @num_params: number of module parameters
508 * 526 *
509 * Adds sysfs entries for module parameters, and creates a link from 527 * Adds sysfs entries for module parameters under
510 * /sys/module/[mod->name]/parameters to /sys/parameters/[mod->name]/ 528 * /sys/module/[mod->name]/parameters/
511 */ 529 */
512int module_param_sysfs_setup(struct module *mod, 530int module_param_sysfs_setup(struct module *mod,
513 struct kernel_param *kparam, 531 struct kernel_param *kparam,
514 unsigned int num_params) 532 unsigned int num_params)
515{ 533{
516 struct module_param_attrs *mp; 534 int i, err;
535 bool params = false;
536
537 for (i = 0; i < num_params; i++) {
538 if (kparam[i].perm == 0)
539 continue;
540 err = add_sysfs_param(&mod->mkobj, &kparam[i], kparam[i].name);
541 if (err)
542 return err;
543 params = true;
544 }
517 545
518 mp = param_sysfs_setup(&mod->mkobj, kparam, num_params, 0); 546 if (!params)
519 if (IS_ERR(mp)) 547 return 0;
520 return PTR_ERR(mp);
521 548
522 mod->param_attrs = mp; 549 /* Create the param group. */
523 return 0; 550 err = sysfs_create_group(&mod->mkobj.kobj, &mod->mkobj.mp->grp);
551 if (err)
552 free_module_param_attrs(&mod->mkobj);
553 return err;
524} 554}
525 555
526/* 556/*
@@ -532,43 +562,55 @@ int module_param_sysfs_setup(struct module *mod,
532 */ 562 */
533void module_param_sysfs_remove(struct module *mod) 563void module_param_sysfs_remove(struct module *mod)
534{ 564{
535 if (mod->param_attrs) { 565 if (mod->mkobj.mp) {
536 sysfs_remove_group(&mod->mkobj.kobj, 566 sysfs_remove_group(&mod->mkobj.kobj, &mod->mkobj.mp->grp);
537 &mod->param_attrs->grp);
538 /* We are positive that no one is using any param 567 /* We are positive that no one is using any param
539 * attrs at this point. Deallocate immediately. */ 568 * attrs at this point. Deallocate immediately. */
540 kfree(mod->param_attrs); 569 free_module_param_attrs(&mod->mkobj);
541 mod->param_attrs = NULL;
542 } 570 }
543} 571}
544#endif 572#endif
545 573
546/* 574static void __init kernel_add_sysfs_param(const char *name,
547 * kernel_param_sysfs_setup - wrapper for built-in params support 575 struct kernel_param *kparam,
548 */ 576 unsigned int name_skip)
549static void __init kernel_param_sysfs_setup(const char *name,
550 struct kernel_param *kparam,
551 unsigned int num_params,
552 unsigned int name_skip)
553{ 577{
554 struct module_kobject *mk; 578 struct module_kobject *mk;
555 int ret; 579 struct kobject *kobj;
580 int err;
556 581
557 mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); 582 kobj = kset_find_obj(module_kset, name);
558 BUG_ON(!mk); 583 if (kobj) {
559 584 /* We already have one. Remove params so we can add more. */
560 mk->mod = THIS_MODULE; 585 mk = to_module_kobject(kobj);
561 mk->kobj.kset = module_kset; 586 /* We need to remove it before adding parameters. */
562 ret = kobject_init_and_add(&mk->kobj, &module_ktype, NULL, "%s", name); 587 sysfs_remove_group(&mk->kobj, &mk->mp->grp);
563 if (ret) { 588 } else {
564 kobject_put(&mk->kobj); 589 mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
565 printk(KERN_ERR "Module '%s' failed to be added to sysfs, " 590 BUG_ON(!mk);
566 "error number %d\n", name, ret); 591
567 printk(KERN_ERR "The system will be unstable now.\n"); 592 mk->mod = THIS_MODULE;
568 return; 593 mk->kobj.kset = module_kset;
594 err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL,
595 "%s", name);
596 if (err) {
597 kobject_put(&mk->kobj);
598 printk(KERN_ERR "Module '%s' failed add to sysfs, "
599 "error number %d\n", name, err);
600 printk(KERN_ERR "The system will be unstable now.\n");
601 return;
602 }
603 /* So that exit path is even. */
604 kobject_get(&mk->kobj);
569 } 605 }
570 param_sysfs_setup(mk, kparam, num_params, name_skip); 606
607 /* These should not fail at boot. */
608 err = add_sysfs_param(mk, kparam, kparam->name + name_skip);
609 BUG_ON(err);
610 err = sysfs_create_group(&mk->kobj, &mk->mp->grp);
611 BUG_ON(err);
571 kobject_uevent(&mk->kobj, KOBJ_ADD); 612 kobject_uevent(&mk->kobj, KOBJ_ADD);
613 kobject_put(&mk->kobj);
572} 614}
573 615
574/* 616/*
@@ -579,60 +621,36 @@ static void __init kernel_param_sysfs_setup(const char *name,
579 * The "module" name (KBUILD_MODNAME) is stored before a dot, the 621 * The "module" name (KBUILD_MODNAME) is stored before a dot, the
580 * "parameter" name is stored behind a dot in kernel_param->name. So, 622 * "parameter" name is stored behind a dot in kernel_param->name. So,
581 * extract the "module" name for all built-in kernel_param-eters, 623 * extract the "module" name for all built-in kernel_param-eters,
582 * and for all who have the same, call kernel_param_sysfs_setup. 624 * and for all who have the same, call kernel_add_sysfs_param.
583 */ 625 */
584static void __init param_sysfs_builtin(void) 626static void __init param_sysfs_builtin(void)
585{ 627{
586 struct kernel_param *kp, *kp_begin = NULL; 628 struct kernel_param *kp;
587 unsigned int i, name_len, count = 0; 629 unsigned int name_len;
588 char modname[MODULE_NAME_LEN + 1] = ""; 630 char modname[MODULE_NAME_LEN];
589 631
590 for (i=0; i < __stop___param - __start___param; i++) { 632 for (kp = __start___param; kp < __stop___param; kp++) {
591 char *dot; 633 char *dot;
592 size_t max_name_len;
593 634
594 kp = &__start___param[i]; 635 if (kp->perm == 0)
595 max_name_len = 636 continue;
596 min_t(size_t, MODULE_NAME_LEN, strlen(kp->name));
597 637
598 dot = memchr(kp->name, '.', max_name_len); 638 dot = strchr(kp->name, '.');
599 if (!dot) { 639 if (!dot) {
600 DEBUGP("couldn't find period in first %d characters " 640 /* This happens for core_param() */
601 "of %s\n", MODULE_NAME_LEN, kp->name); 641 strcpy(modname, "kernel");
602 continue; 642 name_len = 0;
603 } 643 } else {
604 name_len = dot - kp->name; 644 name_len = dot - kp->name + 1;
605 645 strlcpy(modname, kp->name, name_len);
606 /* new kbuild_modname? */
607 if (strlen(modname) != name_len
608 || strncmp(modname, kp->name, name_len) != 0) {
609 /* add a new kobject for previous kernel_params. */
610 if (count)
611 kernel_param_sysfs_setup(modname,
612 kp_begin,
613 count,
614 strlen(modname)+1);
615
616 strncpy(modname, kp->name, name_len);
617 modname[name_len] = '\0';
618 count = 0;
619 kp_begin = kp;
620 } 646 }
621 count++; 647 kernel_add_sysfs_param(modname, kp, name_len);
622 } 648 }
623
624 /* last kernel_params need to be registered as well */
625 if (count)
626 kernel_param_sysfs_setup(modname, kp_begin, count,
627 strlen(modname)+1);
628} 649}
629 650
630 651
631/* module-related sysfs stuff */ 652/* module-related sysfs stuff */
632 653
633#define to_module_attr(n) container_of(n, struct module_attribute, attr);
634#define to_module_kobject(n) container_of(n, struct module_kobject, kobj);
635
636static ssize_t module_attr_show(struct kobject *kobj, 654static ssize_t module_attr_show(struct kobject *kobj,
637 struct attribute *attr, 655 struct attribute *attr,
638 char *buf) 656 char *buf)
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 467d5940f624..ad63af8b2521 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -119,18 +119,19 @@ static void _rcu_barrier(enum rcu_barrier type)
119 /* Take cpucontrol mutex to protect against CPU hotplug */ 119 /* Take cpucontrol mutex to protect against CPU hotplug */
120 mutex_lock(&rcu_barrier_mutex); 120 mutex_lock(&rcu_barrier_mutex);
121 init_completion(&rcu_barrier_completion); 121 init_completion(&rcu_barrier_completion);
122 atomic_set(&rcu_barrier_cpu_count, 0);
123 /* 122 /*
124 * The queueing of callbacks in all CPUs must be atomic with 123 * Initialize rcu_barrier_cpu_count to 1, then invoke
125 * respect to RCU, otherwise one CPU may queue a callback, 124 * rcu_barrier_func() on each CPU, so that each CPU also has
126 * wait for a grace period, decrement barrier count and call 125 * incremented rcu_barrier_cpu_count. Only then is it safe to
127 * complete(), while other CPUs have not yet queued anything. 126 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
128 * So, we need to make sure that grace periods cannot complete 127 * might complete its grace period before all of the other CPUs
129 * until all the callbacks are queued. 128 * did their increment, causing this function to return too
129 * early.
130 */ 130 */
131 rcu_read_lock(); 131 atomic_set(&rcu_barrier_cpu_count, 1);
132 on_each_cpu(rcu_barrier_func, (void *)type, 1); 132 on_each_cpu(rcu_barrier_func, (void *)type, 1);
133 rcu_read_unlock(); 133 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
134 complete(&rcu_barrier_completion);
134 wait_for_completion(&rcu_barrier_completion); 135 wait_for_completion(&rcu_barrier_completion);
135 mutex_unlock(&rcu_barrier_mutex); 136 mutex_unlock(&rcu_barrier_mutex);
136} 137}
diff --git a/kernel/sched.c b/kernel/sched.c
index d906f72b42d2..945a97b9600d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -819,6 +819,13 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
819unsigned int sysctl_sched_shares_ratelimit = 250000; 819unsigned int sysctl_sched_shares_ratelimit = 250000;
820 820
821/* 821/*
822 * Inject some fuzzyness into changing the per-cpu group shares
823 * this avoids remote rq-locks at the expense of fairness.
824 * default: 4
825 */
826unsigned int sysctl_sched_shares_thresh = 4;
827
828/*
822 * period over which we measure -rt task cpu usage in us. 829 * period over which we measure -rt task cpu usage in us.
823 * default: 1s 830 * default: 1s
824 */ 831 */
@@ -1454,8 +1461,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1454 * Calculate and set the cpu's group shares. 1461 * Calculate and set the cpu's group shares.
1455 */ 1462 */
1456static void 1463static void
1457__update_group_shares_cpu(struct task_group *tg, int cpu, 1464update_group_shares_cpu(struct task_group *tg, int cpu,
1458 unsigned long sd_shares, unsigned long sd_rq_weight) 1465 unsigned long sd_shares, unsigned long sd_rq_weight)
1459{ 1466{
1460 int boost = 0; 1467 int boost = 0;
1461 unsigned long shares; 1468 unsigned long shares;
@@ -1486,19 +1493,23 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
1486 * 1493 *
1487 */ 1494 */
1488 shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); 1495 shares = (sd_shares * rq_weight) / (sd_rq_weight + 1);
1496 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
1489 1497
1490 /* 1498 if (abs(shares - tg->se[cpu]->load.weight) >
1491 * record the actual number of shares, not the boosted amount. 1499 sysctl_sched_shares_thresh) {
1492 */ 1500 struct rq *rq = cpu_rq(cpu);
1493 tg->cfs_rq[cpu]->shares = boost ? 0 : shares; 1501 unsigned long flags;
1494 tg->cfs_rq[cpu]->rq_weight = rq_weight;
1495 1502
1496 if (shares < MIN_SHARES) 1503 spin_lock_irqsave(&rq->lock, flags);
1497 shares = MIN_SHARES; 1504 /*
1498 else if (shares > MAX_SHARES) 1505 * record the actual number of shares, not the boosted amount.
1499 shares = MAX_SHARES; 1506 */
1507 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1508 tg->cfs_rq[cpu]->rq_weight = rq_weight;
1500 1509
1501 __set_se_shares(tg->se[cpu], shares); 1510 __set_se_shares(tg->se[cpu], shares);
1511 spin_unlock_irqrestore(&rq->lock, flags);
1512 }
1502} 1513}
1503 1514
1504/* 1515/*
@@ -1527,14 +1538,8 @@ static int tg_shares_up(struct task_group *tg, void *data)
1527 if (!rq_weight) 1538 if (!rq_weight)
1528 rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; 1539 rq_weight = cpus_weight(sd->span) * NICE_0_LOAD;
1529 1540
1530 for_each_cpu_mask(i, sd->span) { 1541 for_each_cpu_mask(i, sd->span)
1531 struct rq *rq = cpu_rq(i); 1542 update_group_shares_cpu(tg, i, shares, rq_weight);
1532 unsigned long flags;
1533
1534 spin_lock_irqsave(&rq->lock, flags);
1535 __update_group_shares_cpu(tg, i, shares, rq_weight);
1536 spin_unlock_irqrestore(&rq->lock, flags);
1537 }
1538 1543
1539 return 0; 1544 return 0;
1540} 1545}
@@ -4443,12 +4448,8 @@ need_resched_nonpreemptible:
4443 if (sched_feat(HRTICK)) 4448 if (sched_feat(HRTICK))
4444 hrtick_clear(rq); 4449 hrtick_clear(rq);
4445 4450
4446 /* 4451 spin_lock_irq(&rq->lock);
4447 * Do the rq-clock update outside the rq lock:
4448 */
4449 local_irq_disable();
4450 update_rq_clock(rq); 4452 update_rq_clock(rq);
4451 spin_lock(&rq->lock);
4452 clear_tsk_need_resched(prev); 4453 clear_tsk_need_resched(prev);
4453 4454
4454 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 4455 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f604dae71316..9573c33688b8 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -73,6 +73,8 @@ unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
73 73
74const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 74const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
75 75
76static const struct sched_class fair_sched_class;
77
76/************************************************************** 78/**************************************************************
77 * CFS operations on generic schedulable entities: 79 * CFS operations on generic schedulable entities:
78 */ 80 */
@@ -334,7 +336,7 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
334#endif 336#endif
335 337
336/* 338/*
337 * delta *= w / rw 339 * delta *= P[w / rw]
338 */ 340 */
339static inline unsigned long 341static inline unsigned long
340calc_delta_weight(unsigned long delta, struct sched_entity *se) 342calc_delta_weight(unsigned long delta, struct sched_entity *se)
@@ -348,15 +350,13 @@ calc_delta_weight(unsigned long delta, struct sched_entity *se)
348} 350}
349 351
350/* 352/*
351 * delta *= rw / w 353 * delta /= w
352 */ 354 */
353static inline unsigned long 355static inline unsigned long
354calc_delta_fair(unsigned long delta, struct sched_entity *se) 356calc_delta_fair(unsigned long delta, struct sched_entity *se)
355{ 357{
356 for_each_sched_entity(se) { 358 if (unlikely(se->load.weight != NICE_0_LOAD))
357 delta = calc_delta_mine(delta, 359 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
358 cfs_rq_of(se)->load.weight, &se->load);
359 }
360 360
361 return delta; 361 return delta;
362} 362}
@@ -386,26 +386,26 @@ static u64 __sched_period(unsigned long nr_running)
386 * We calculate the wall-time slice from the period by taking a part 386 * We calculate the wall-time slice from the period by taking a part
387 * proportional to the weight. 387 * proportional to the weight.
388 * 388 *
389 * s = p*w/rw 389 * s = p*P[w/rw]
390 */ 390 */
391static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 391static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
392{ 392{
393 return calc_delta_weight(__sched_period(cfs_rq->nr_running), se); 393 unsigned long nr_running = cfs_rq->nr_running;
394
395 if (unlikely(!se->on_rq))
396 nr_running++;
397
398 return calc_delta_weight(__sched_period(nr_running), se);
394} 399}
395 400
396/* 401/*
397 * We calculate the vruntime slice of a to be inserted task 402 * We calculate the vruntime slice of a to be inserted task
398 * 403 *
399 * vs = s*rw/w = p 404 * vs = s/w
400 */ 405 */
401static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) 406static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
402{ 407{
403 unsigned long nr_running = cfs_rq->nr_running; 408 return calc_delta_fair(sched_slice(cfs_rq, se), se);
404
405 if (!se->on_rq)
406 nr_running++;
407
408 return __sched_period(nr_running);
409} 409}
410 410
411/* 411/*
@@ -628,7 +628,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
628 * stays open at the end. 628 * stays open at the end.
629 */ 629 */
630 if (initial && sched_feat(START_DEBIT)) 630 if (initial && sched_feat(START_DEBIT))
631 vruntime += sched_vslice_add(cfs_rq, se); 631 vruntime += sched_vslice(cfs_rq, se);
632 632
633 if (!initial) { 633 if (!initial) {
634 /* sleeps upto a single latency don't count. */ 634 /* sleeps upto a single latency don't count. */
@@ -748,7 +748,7 @@ pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
748 struct rq *rq = rq_of(cfs_rq); 748 struct rq *rq = rq_of(cfs_rq);
749 u64 pair_slice = rq->clock - cfs_rq->pair_start; 749 u64 pair_slice = rq->clock - cfs_rq->pair_start;
750 750
751 if (!cfs_rq->next || pair_slice > sched_slice(cfs_rq, cfs_rq->next)) { 751 if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) {
752 cfs_rq->pair_start = rq->clock; 752 cfs_rq->pair_start = rq->clock;
753 return se; 753 return se;
754 } 754 }
@@ -849,11 +849,31 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
849 hrtick_start(rq, delta); 849 hrtick_start(rq, delta);
850 } 850 }
851} 851}
852
853/*
854 * called from enqueue/dequeue and updates the hrtick when the
855 * current task is from our class and nr_running is low enough
856 * to matter.
857 */
858static void hrtick_update(struct rq *rq)
859{
860 struct task_struct *curr = rq->curr;
861
862 if (curr->sched_class != &fair_sched_class)
863 return;
864
865 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
866 hrtick_start_fair(rq, curr);
867}
852#else /* !CONFIG_SCHED_HRTICK */ 868#else /* !CONFIG_SCHED_HRTICK */
853static inline void 869static inline void
854hrtick_start_fair(struct rq *rq, struct task_struct *p) 870hrtick_start_fair(struct rq *rq, struct task_struct *p)
855{ 871{
856} 872}
873
874static inline void hrtick_update(struct rq *rq)
875{
876}
857#endif 877#endif
858 878
859/* 879/*
@@ -874,7 +894,7 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
874 wakeup = 1; 894 wakeup = 1;
875 } 895 }
876 896
877 hrtick_start_fair(rq, rq->curr); 897 hrtick_update(rq);
878} 898}
879 899
880/* 900/*
@@ -896,7 +916,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
896 sleep = 1; 916 sleep = 1;
897 } 917 }
898 918
899 hrtick_start_fair(rq, rq->curr); 919 hrtick_update(rq);
900} 920}
901 921
902/* 922/*
@@ -1002,8 +1022,6 @@ static inline int wake_idle(int cpu, struct task_struct *p)
1002 1022
1003#ifdef CONFIG_SMP 1023#ifdef CONFIG_SMP
1004 1024
1005static const struct sched_class fair_sched_class;
1006
1007#ifdef CONFIG_FAIR_GROUP_SCHED 1025#ifdef CONFIG_FAIR_GROUP_SCHED
1008/* 1026/*
1009 * effective_load() calculates the load change as seen from the root_task_group 1027 * effective_load() calculates the load change as seen from the root_task_group
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 7c9e8f4a049f..fda016218296 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -5,7 +5,7 @@ SCHED_FEAT(START_DEBIT, 1)
5SCHED_FEAT(AFFINE_WAKEUPS, 1) 5SCHED_FEAT(AFFINE_WAKEUPS, 1)
6SCHED_FEAT(CACHE_HOT_BUDDY, 1) 6SCHED_FEAT(CACHE_HOT_BUDDY, 1)
7SCHED_FEAT(SYNC_WAKEUPS, 1) 7SCHED_FEAT(SYNC_WAKEUPS, 1)
8SCHED_FEAT(HRTICK, 1) 8SCHED_FEAT(HRTICK, 0)
9SCHED_FEAT(DOUBLE_TICK, 0) 9SCHED_FEAT(DOUBLE_TICK, 0)
10SCHED_FEAT(ASYM_GRAN, 1) 10SCHED_FEAT(ASYM_GRAN, 1)
11SCHED_FEAT(LB_BIAS, 1) 11SCHED_FEAT(LB_BIAS, 1)
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index b8c156979cf2..2df9d297d292 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -9,7 +9,7 @@
9static int show_schedstat(struct seq_file *seq, void *v) 9static int show_schedstat(struct seq_file *seq, void *v)
10{ 10{
11 int cpu; 11 int cpu;
12 int mask_len = NR_CPUS/32 * 9; 12 int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
13 char *mask_str = kmalloc(mask_len, GFP_KERNEL); 13 char *mask_str = kmalloc(mask_len, GFP_KERNEL);
14 14
15 if (mask_str == NULL) 15 if (mask_str == NULL)
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index af3c7cea258b..8aff79d90ddc 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -37,9 +37,13 @@ struct stop_machine_data {
37/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ 37/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
38static unsigned int num_threads; 38static unsigned int num_threads;
39static atomic_t thread_ack; 39static atomic_t thread_ack;
40static struct completion finished;
41static DEFINE_MUTEX(lock); 40static DEFINE_MUTEX(lock);
42 41
42static struct workqueue_struct *stop_machine_wq;
43static struct stop_machine_data active, idle;
44static const cpumask_t *active_cpus;
45static void *stop_machine_work;
46
43static void set_state(enum stopmachine_state newstate) 47static void set_state(enum stopmachine_state newstate)
44{ 48{
45 /* Reset ack counter. */ 49 /* Reset ack counter. */
@@ -51,21 +55,26 @@ static void set_state(enum stopmachine_state newstate)
51/* Last one to ack a state moves to the next state. */ 55/* Last one to ack a state moves to the next state. */
52static void ack_state(void) 56static void ack_state(void)
53{ 57{
54 if (atomic_dec_and_test(&thread_ack)) { 58 if (atomic_dec_and_test(&thread_ack))
55 /* If we're the last one to ack the EXIT, we're finished. */ 59 set_state(state + 1);
56 if (state == STOPMACHINE_EXIT)
57 complete(&finished);
58 else
59 set_state(state + 1);
60 }
61} 60}
62 61
63/* This is the actual thread which stops the CPU. It exits by itself rather 62/* This is the actual function which stops the CPU. It runs
64 * than waiting for kthread_stop(), because it's easier for hotplug CPU. */ 63 * in the context of a dedicated stopmachine workqueue. */
65static int stop_cpu(struct stop_machine_data *smdata) 64static void stop_cpu(struct work_struct *unused)
66{ 65{
67 enum stopmachine_state curstate = STOPMACHINE_NONE; 66 enum stopmachine_state curstate = STOPMACHINE_NONE;
68 67 struct stop_machine_data *smdata = &idle;
68 int cpu = smp_processor_id();
69 int err;
70
71 if (!active_cpus) {
72 if (cpu == first_cpu(cpu_online_map))
73 smdata = &active;
74 } else {
75 if (cpu_isset(cpu, *active_cpus))
76 smdata = &active;
77 }
69 /* Simple state machine */ 78 /* Simple state machine */
70 do { 79 do {
71 /* Chill out and ensure we re-read stopmachine_state. */ 80 /* Chill out and ensure we re-read stopmachine_state. */
@@ -78,9 +87,11 @@ static int stop_cpu(struct stop_machine_data *smdata)
78 hard_irq_disable(); 87 hard_irq_disable();
79 break; 88 break;
80 case STOPMACHINE_RUN: 89 case STOPMACHINE_RUN:
81 /* |= allows error detection if functions on 90 /* On multiple CPUs only a single error code
82 * multiple CPUs. */ 91 * is needed to tell that something failed. */
83 smdata->fnret |= smdata->fn(smdata->data); 92 err = smdata->fn(smdata->data);
93 if (err)
94 smdata->fnret = err;
84 break; 95 break;
85 default: 96 default:
86 break; 97 break;
@@ -90,7 +101,6 @@ static int stop_cpu(struct stop_machine_data *smdata)
90 } while (curstate != STOPMACHINE_EXIT); 101 } while (curstate != STOPMACHINE_EXIT);
91 102
92 local_irq_enable(); 103 local_irq_enable();
93 do_exit(0);
94} 104}
95 105
96/* Callback for CPUs which aren't supposed to do anything. */ 106/* Callback for CPUs which aren't supposed to do anything. */
@@ -101,78 +111,34 @@ static int chill(void *unused)
101 111
102int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 112int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
103{ 113{
104 int i, err; 114 struct work_struct *sm_work;
105 struct stop_machine_data active, idle; 115 int i;
106 struct task_struct **threads;
107 116
117 /* Set up initial state. */
118 mutex_lock(&lock);
119 num_threads = num_online_cpus();
120 active_cpus = cpus;
108 active.fn = fn; 121 active.fn = fn;
109 active.data = data; 122 active.data = data;
110 active.fnret = 0; 123 active.fnret = 0;
111 idle.fn = chill; 124 idle.fn = chill;
112 idle.data = NULL; 125 idle.data = NULL;
113 126
114 /* This could be too big for stack on large machines. */
115 threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL);
116 if (!threads)
117 return -ENOMEM;
118
119 /* Set up initial state. */
120 mutex_lock(&lock);
121 init_completion(&finished);
122 num_threads = num_online_cpus();
123 set_state(STOPMACHINE_PREPARE); 127 set_state(STOPMACHINE_PREPARE);
124 128
125 for_each_online_cpu(i) { 129 /* Schedule the stop_cpu work on all cpus: hold this CPU so one
126 struct stop_machine_data *smdata = &idle;
127 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
128
129 if (!cpus) {
130 if (i == first_cpu(cpu_online_map))
131 smdata = &active;
132 } else {
133 if (cpu_isset(i, *cpus))
134 smdata = &active;
135 }
136
137 threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u",
138 i);
139 if (IS_ERR(threads[i])) {
140 err = PTR_ERR(threads[i]);
141 threads[i] = NULL;
142 goto kill_threads;
143 }
144
145 /* Place it onto correct cpu. */
146 kthread_bind(threads[i], i);
147
148 /* Make it highest prio. */
149 if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, &param))
150 BUG();
151 }
152
153 /* We've created all the threads. Wake them all: hold this CPU so one
154 * doesn't hit this CPU until we're ready. */ 130 * doesn't hit this CPU until we're ready. */
155 get_cpu(); 131 get_cpu();
156 for_each_online_cpu(i) 132 for_each_online_cpu(i) {
157 wake_up_process(threads[i]); 133 sm_work = percpu_ptr(stop_machine_work, i);
158 134 INIT_WORK(sm_work, stop_cpu);
135 queue_work_on(i, stop_machine_wq, sm_work);
136 }
159 /* This will release the thread on our CPU. */ 137 /* This will release the thread on our CPU. */
160 put_cpu(); 138 put_cpu();
161 wait_for_completion(&finished); 139 flush_workqueue(stop_machine_wq);
162 mutex_unlock(&lock); 140 mutex_unlock(&lock);
163
164 kfree(threads);
165
166 return active.fnret; 141 return active.fnret;
167
168kill_threads:
169 for_each_online_cpu(i)
170 if (threads[i])
171 kthread_stop(threads[i]);
172 mutex_unlock(&lock);
173
174 kfree(threads);
175 return err;
176} 142}
177 143
178int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 144int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
@@ -187,3 +153,11 @@ int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
187 return ret; 153 return ret;
188} 154}
189EXPORT_SYMBOL_GPL(stop_machine); 155EXPORT_SYMBOL_GPL(stop_machine);
156
157static int __init stop_machine_init(void)
158{
159 stop_machine_wq = create_rt_workqueue("kstop");
160 stop_machine_work = alloc_percpu(struct work_struct);
161 return 0;
162}
163early_initcall(stop_machine_init);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b3cc73931d1f..a13bd4dfaeb1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -276,6 +276,16 @@ static struct ctl_table kern_table[] = {
276 }, 276 },
277 { 277 {
278 .ctl_name = CTL_UNNUMBERED, 278 .ctl_name = CTL_UNNUMBERED,
279 .procname = "sched_shares_thresh",
280 .data = &sysctl_sched_shares_thresh,
281 .maxlen = sizeof(unsigned int),
282 .mode = 0644,
283 .proc_handler = &proc_dointvec_minmax,
284 .strategy = &sysctl_intvec,
285 .extra1 = &zero,
286 },
287 {
288 .ctl_name = CTL_UNNUMBERED,
279 .procname = "sched_child_runs_first", 289 .procname = "sched_child_runs_first",
280 .data = &sysctl_sched_child_runs_first, 290 .data = &sysctl_sched_child_runs_first,
281 .maxlen = sizeof(unsigned int), 291 .maxlen = sizeof(unsigned int),
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 0581c11fe6c6..727c1ae0517a 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -567,11 +567,21 @@ static void tick_nohz_switch_to_nohz(void)
567static void tick_nohz_kick_tick(int cpu) 567static void tick_nohz_kick_tick(int cpu)
568{ 568{
569 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 569 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
570 ktime_t delta, now;
570 571
571 if (!ts->tick_stopped) 572 if (!ts->tick_stopped)
572 return; 573 return;
573 574
574 tick_nohz_restart(ts, ktime_get()); 575 /*
576 * Do not touch the tick device, when the next expiry is either
577 * already reached or less/equal than the tick period.
578 */
579 now = ktime_get();
580 delta = ktime_sub(ts->sched_timer.expires, now);
581 if (delta.tv64 <= tick_period.tv64)
582 return;
583
584 tick_nohz_restart(ts, now);
575} 585}
576 586
577#else 587#else
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 714afad46539..f928f2a87b9b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -62,6 +62,7 @@ struct workqueue_struct {
62 const char *name; 62 const char *name;
63 int singlethread; 63 int singlethread;
64 int freezeable; /* Freeze threads during suspend */ 64 int freezeable; /* Freeze threads during suspend */
65 int rt;
65#ifdef CONFIG_LOCKDEP 66#ifdef CONFIG_LOCKDEP
66 struct lockdep_map lockdep_map; 67 struct lockdep_map lockdep_map;
67#endif 68#endif
@@ -766,6 +767,7 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
766 767
767static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 768static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
768{ 769{
770 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
769 struct workqueue_struct *wq = cwq->wq; 771 struct workqueue_struct *wq = cwq->wq;
770 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; 772 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
771 struct task_struct *p; 773 struct task_struct *p;
@@ -781,7 +783,8 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
781 */ 783 */
782 if (IS_ERR(p)) 784 if (IS_ERR(p))
783 return PTR_ERR(p); 785 return PTR_ERR(p);
784 786 if (cwq->wq->rt)
787 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
785 cwq->thread = p; 788 cwq->thread = p;
786 789
787 return 0; 790 return 0;
@@ -801,6 +804,7 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
801struct workqueue_struct *__create_workqueue_key(const char *name, 804struct workqueue_struct *__create_workqueue_key(const char *name,
802 int singlethread, 805 int singlethread,
803 int freezeable, 806 int freezeable,
807 int rt,
804 struct lock_class_key *key, 808 struct lock_class_key *key,
805 const char *lock_name) 809 const char *lock_name)
806{ 810{
@@ -822,6 +826,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
822 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 826 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
823 wq->singlethread = singlethread; 827 wq->singlethread = singlethread;
824 wq->freezeable = freezeable; 828 wq->freezeable = freezeable;
829 wq->rt = rt;
825 INIT_LIST_HEAD(&wq->list); 830 INIT_LIST_HEAD(&wq->list);
826 831
827 if (singlethread) { 832 if (singlethread) {
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 482df94ea21e..1338469ac849 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -996,3 +996,25 @@ int bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
996 return 0; 996 return 0;
997} 997}
998EXPORT_SYMBOL(bitmap_allocate_region); 998EXPORT_SYMBOL(bitmap_allocate_region);
999
1000/**
1001 * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
1002 * @dst: destination buffer
1003 * @src: bitmap to copy
1004 * @nbits: number of bits in the bitmap
1005 *
1006 * Require nbits % BITS_PER_LONG == 0.
1007 */
1008void bitmap_copy_le(void *dst, const unsigned long *src, int nbits)
1009{
1010 unsigned long *d = dst;
1011 int i;
1012
1013 for (i = 0; i < nbits/BITS_PER_LONG; i++) {
1014 if (BITS_PER_LONG == 64)
1015 d[i] = cpu_to_le64(src[i]);
1016 else
1017 d[i] = cpu_to_le32(src[i]);
1018 }
1019}
1020EXPORT_SYMBOL(bitmap_copy_le);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d4a92b63e98e..866dcc7eeb0c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1088,7 +1088,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1088 int node; 1088 int node;
1089 1089
1090 if (unlikely((cont->parent) == NULL)) { 1090 if (unlikely((cont->parent) == NULL)) {
1091 page_cgroup_init();
1092 mem = &init_mem_cgroup; 1091 mem = &init_mem_cgroup;
1093 } else { 1092 } else {
1094 mem = mem_cgroup_alloc(); 1093 mem = mem_cgroup_alloc();
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 5d86550701f2..f59d797dc5a9 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -4,7 +4,10 @@
4#include <linux/bit_spinlock.h> 4#include <linux/bit_spinlock.h>
5#include <linux/page_cgroup.h> 5#include <linux/page_cgroup.h>
6#include <linux/hash.h> 6#include <linux/hash.h>
7#include <linux/slab.h>
7#include <linux/memory.h> 8#include <linux/memory.h>
9#include <linux/vmalloc.h>
10#include <linux/cgroup.h>
8 11
9static void __meminit 12static void __meminit
10__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) 13__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
@@ -66,6 +69,9 @@ void __init page_cgroup_init(void)
66 69
67 int nid, fail; 70 int nid, fail;
68 71
72 if (mem_cgroup_subsys.disabled)
73 return;
74
69 for_each_online_node(nid) { 75 for_each_online_node(nid) {
70 fail = alloc_node_page_cgroup(nid); 76 fail = alloc_node_page_cgroup(nid);
71 if (fail) 77 if (fail)
@@ -106,9 +112,14 @@ int __meminit init_section_page_cgroup(unsigned long pfn)
106 nid = page_to_nid(pfn_to_page(pfn)); 112 nid = page_to_nid(pfn_to_page(pfn));
107 113
108 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 114 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
109 base = kmalloc_node(table_size, GFP_KERNEL, nid); 115 if (slab_is_available()) {
110 if (!base) 116 base = kmalloc_node(table_size, GFP_KERNEL, nid);
111 base = vmalloc_node(table_size, nid); 117 if (!base)
118 base = vmalloc_node(table_size, nid);
119 } else {
120 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), table_size,
121 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
122 }
112 123
113 if (!base) { 124 if (!base) {
114 printk(KERN_ERR "page cgroup allocation failure\n"); 125 printk(KERN_ERR "page cgroup allocation failure\n");
@@ -135,11 +146,16 @@ void __free_page_cgroup(unsigned long pfn)
135 if (!ms || !ms->page_cgroup) 146 if (!ms || !ms->page_cgroup)
136 return; 147 return;
137 base = ms->page_cgroup + pfn; 148 base = ms->page_cgroup + pfn;
138 ms->page_cgroup = NULL; 149 if (is_vmalloc_addr(base)) {
139 if (is_vmalloc_addr(base))
140 vfree(base); 150 vfree(base);
141 else 151 ms->page_cgroup = NULL;
142 kfree(base); 152 } else {
153 struct page *page = virt_to_page(base);
154 if (!PageReserved(page)) { /* Is bootmem ? */
155 kfree(base);
156 ms->page_cgroup = NULL;
157 }
158 }
143} 159}
144 160
145int online_page_cgroup(unsigned long start_pfn, 161int online_page_cgroup(unsigned long start_pfn,
@@ -213,6 +229,9 @@ void __init page_cgroup_init(void)
213 unsigned long pfn; 229 unsigned long pfn;
214 int fail = 0; 230 int fail = 0;
215 231
232 if (mem_cgroup_subsys.disabled)
233 return;
234
216 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) { 235 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
217 if (!pfn_present(pfn)) 236 if (!pfn_present(pfn))
218 continue; 237 continue;
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index ff34c5acc130..c42c0c400bf9 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -20,6 +20,12 @@ config NET_9P_VIRTIO
20 This builds support for a transports between 20 This builds support for a transports between
21 guest partitions and a host partition. 21 guest partitions and a host partition.
22 22
23config NET_9P_RDMA
24 depends on NET_9P && INFINIBAND && EXPERIMENTAL
25 tristate "9P RDMA Transport (Experimental)"
26 help
27 This builds support for a RDMA transport.
28
23config NET_9P_DEBUG 29config NET_9P_DEBUG
24 bool "Debug information" 30 bool "Debug information"
25 depends on NET_9P 31 depends on NET_9P
diff --git a/net/9p/Makefile b/net/9p/Makefile
index 1041b7bd12e2..198a640d53a6 100644
--- a/net/9p/Makefile
+++ b/net/9p/Makefile
@@ -1,5 +1,6 @@
1obj-$(CONFIG_NET_9P) := 9pnet.o 1obj-$(CONFIG_NET_9P) := 9pnet.o
2obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o 2obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o
3obj-$(CONFIG_NET_9P_RDMA) += 9pnet_rdma.o
3 4
49pnet-objs := \ 59pnet-objs := \
5 mod.o \ 6 mod.o \
@@ -11,3 +12,6 @@ obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o
11 12
129pnet_virtio-objs := \ 139pnet_virtio-objs := \
13 trans_virtio.o \ 14 trans_virtio.o \
15
169pnet_rdma-objs := \
17 trans_rdma.o \
diff --git a/net/9p/client.c b/net/9p/client.c
index bbac2f72b4d2..67717f69412e 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -159,6 +159,7 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
159 159
160 if (!c->reqs[row]) { 160 if (!c->reqs[row]) {
161 printk(KERN_ERR "Couldn't grow tag array\n"); 161 printk(KERN_ERR "Couldn't grow tag array\n");
162 spin_unlock_irqrestore(&c->lock, flags);
162 return ERR_PTR(-ENOMEM); 163 return ERR_PTR(-ENOMEM);
163 } 164 }
164 for (col = 0; col < P9_ROW_MAXTAG; col++) { 165 for (col = 0; col < P9_ROW_MAXTAG; col++) {
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 29be52439086..dcd7666824ba 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -53,6 +53,7 @@
53static int 53static int
54p9pdu_writef(struct p9_fcall *pdu, int optional, const char *fmt, ...); 54p9pdu_writef(struct p9_fcall *pdu, int optional, const char *fmt, ...);
55 55
56#ifdef CONFIG_NET_9P_DEBUG
56void 57void
57p9pdu_dump(int way, struct p9_fcall *pdu) 58p9pdu_dump(int way, struct p9_fcall *pdu)
58{ 59{
@@ -81,6 +82,12 @@ p9pdu_dump(int way, struct p9_fcall *pdu)
81 else 82 else
82 P9_DPRINTK(P9_DEBUG_PKT, "]]](%d) %s\n", datalen, buf); 83 P9_DPRINTK(P9_DEBUG_PKT, "]]](%d) %s\n", datalen, buf);
83} 84}
85#else
86void
87p9pdu_dump(int way, struct p9_fcall *pdu)
88{
89}
90#endif
84EXPORT_SYMBOL(p9pdu_dump); 91EXPORT_SYMBOL(p9pdu_dump);
85 92
86void p9stat_free(struct p9_wstat *stbuf) 93void p9stat_free(struct p9_wstat *stbuf)
@@ -179,7 +186,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
179 } 186 }
180 break; 187 break;
181 case 's':{ 188 case 's':{
182 char **ptr = va_arg(ap, char **); 189 char **sptr = va_arg(ap, char **);
183 int16_t len; 190 int16_t len;
184 int size; 191 int size;
185 192
@@ -189,17 +196,17 @@ p9pdu_vreadf(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
189 196
190 size = MAX(len, 0); 197 size = MAX(len, 0);
191 198
192 *ptr = kmalloc(size + 1, GFP_KERNEL); 199 *sptr = kmalloc(size + 1, GFP_KERNEL);
193 if (*ptr == NULL) { 200 if (*sptr == NULL) {
194 errcode = -EFAULT; 201 errcode = -EFAULT;
195 break; 202 break;
196 } 203 }
197 if (pdu_read(pdu, *ptr, size)) { 204 if (pdu_read(pdu, *sptr, size)) {
198 errcode = -EFAULT; 205 errcode = -EFAULT;
199 kfree(*ptr); 206 kfree(*sptr);
200 *ptr = NULL; 207 *sptr = NULL;
201 } else 208 } else
202 (*ptr)[size] = 0; 209 (*sptr)[size] = 0;
203 } 210 }
204 break; 211 break;
205 case 'Q':{ 212 case 'Q':{
@@ -373,13 +380,13 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
373 } 380 }
374 break; 381 break;
375 case 's':{ 382 case 's':{
376 const char *ptr = va_arg(ap, const char *); 383 const char *sptr = va_arg(ap, const char *);
377 int16_t len = 0; 384 int16_t len = 0;
378 if (ptr) 385 if (sptr)
379 len = MIN(strlen(ptr), USHORT_MAX); 386 len = MIN(strlen(sptr), USHORT_MAX);
380 387
381 errcode = p9pdu_writef(pdu, optional, "w", len); 388 errcode = p9pdu_writef(pdu, optional, "w", len);
382 if (!errcode && pdu_write(pdu, ptr, len)) 389 if (!errcode && pdu_write(pdu, sptr, len))
383 errcode = -EFAULT; 390 errcode = -EFAULT;
384 } 391 }
385 break; 392 break;
@@ -419,7 +426,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int optional, const char *fmt, va_list ap)
419 case 'U':{ 426 case 'U':{
420 int32_t count = va_arg(ap, int32_t); 427 int32_t count = va_arg(ap, int32_t);
421 const char __user *udata = 428 const char __user *udata =
422 va_arg(ap, const void *); 429 va_arg(ap, const void __user *);
423 errcode = 430 errcode =
424 p9pdu_writef(pdu, optional, "d", count); 431 p9pdu_writef(pdu, optional, "d", count);
425 if (!errcode && pdu_write_u(pdu, udata, count)) 432 if (!errcode && pdu_write_u(pdu, udata, count))
@@ -542,8 +549,10 @@ int p9pdu_finalize(struct p9_fcall *pdu)
542 err = p9pdu_writef(pdu, 0, "d", size); 549 err = p9pdu_writef(pdu, 0, "d", size);
543 pdu->size = size; 550 pdu->size = size;
544 551
552#ifdef CONFIG_NET_9P_DEBUG
545 if ((p9_debug_level & P9_DEBUG_PKT) == P9_DEBUG_PKT) 553 if ((p9_debug_level & P9_DEBUG_PKT) == P9_DEBUG_PKT)
546 p9pdu_dump(0, pdu); 554 p9pdu_dump(0, pdu);
555#endif
547 556
548 P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size, 557 P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size,
549 pdu->id, pdu->tag); 558 pdu->id, pdu->tag);
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index be65d8242fd2..1df0356f242b 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -678,11 +678,9 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
678 678
679static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req) 679static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
680{ 680{
681 struct p9_trans_fd *ts = client->trans;
682 struct p9_conn *m = ts->conn;
683 int ret = 1; 681 int ret = 1;
684 682
685 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p req %p\n", m, req); 683 P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
686 684
687 spin_lock(&client->lock); 685 spin_lock(&client->lock);
688 list_del(&req->req_list); 686 list_del(&req->req_list);
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
new file mode 100644
index 000000000000..8d6cc4777aae
--- /dev/null
+++ b/net/9p/trans_rdma.c
@@ -0,0 +1,712 @@
1/*
2 * linux/fs/9p/trans_rdma.c
3 *
4 * RDMA transport layer based on the trans_fd.c implementation.
5 *
6 * Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
7 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
8 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
9 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
10 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to:
23 * Free Software Foundation
24 * 51 Franklin Street, Fifth Floor
25 * Boston, MA 02111-1301 USA
26 *
27 */
28
29#include <linux/in.h>
30#include <linux/module.h>
31#include <linux/net.h>
32#include <linux/ipv6.h>
33#include <linux/kthread.h>
34#include <linux/errno.h>
35#include <linux/kernel.h>
36#include <linux/un.h>
37#include <linux/uaccess.h>
38#include <linux/inet.h>
39#include <linux/idr.h>
40#include <linux/file.h>
41#include <linux/parser.h>
42#include <linux/semaphore.h>
43#include <net/9p/9p.h>
44#include <net/9p/client.h>
45#include <net/9p/transport.h>
46#include <rdma/ib_verbs.h>
47#include <rdma/rdma_cm.h>
48#include <rdma/ib_verbs.h>
49
50#define P9_PORT 5640
51#define P9_RDMA_SQ_DEPTH 32
52#define P9_RDMA_RQ_DEPTH 32
53#define P9_RDMA_SEND_SGE 4
54#define P9_RDMA_RECV_SGE 4
55#define P9_RDMA_IRD 0
56#define P9_RDMA_ORD 0
57#define P9_RDMA_TIMEOUT 30000 /* 30 seconds */
58#define P9_RDMA_MAXSIZE (4*4096) /* Min SGE is 4, so we can
59 * safely advertise a maxsize
60 * of 64k */
61
62#define P9_RDMA_MAX_SGE (P9_RDMA_MAXSIZE >> PAGE_SHIFT)
63/**
64 * struct p9_trans_rdma - RDMA transport instance
65 *
66 * @state: tracks the transport state machine for connection setup and tear down
67 * @cm_id: The RDMA CM ID
68 * @pd: Protection Domain pointer
69 * @qp: Queue Pair pointer
70 * @cq: Completion Queue pointer
71 * @lkey: The local access only memory region key
72 * @timeout: Number of uSecs to wait for connection management events
73 * @sq_depth: The depth of the Send Queue
74 * @sq_sem: Semaphore for the SQ
75 * @rq_depth: The depth of the Receive Queue.
76 * @addr: The remote peer's address
77 * @req_lock: Protects the active request list
78 * @send_wait: Wait list when the SQ fills up
79 * @cm_done: Completion event for connection management tracking
80 */
81struct p9_trans_rdma {
82 enum {
83 P9_RDMA_INIT,
84 P9_RDMA_ADDR_RESOLVED,
85 P9_RDMA_ROUTE_RESOLVED,
86 P9_RDMA_CONNECTED,
87 P9_RDMA_FLUSHING,
88 P9_RDMA_CLOSING,
89 P9_RDMA_CLOSED,
90 } state;
91 struct rdma_cm_id *cm_id;
92 struct ib_pd *pd;
93 struct ib_qp *qp;
94 struct ib_cq *cq;
95 struct ib_mr *dma_mr;
96 u32 lkey;
97 long timeout;
98 int sq_depth;
99 struct semaphore sq_sem;
100 int rq_depth;
101 atomic_t rq_count;
102 struct sockaddr_in addr;
103 spinlock_t req_lock;
104
105 struct completion cm_done;
106};
107
108/**
109 * p9_rdma_context - Keeps track of in-process WR
110 *
111 * @wc_op: The original WR op for when the CQE completes in error.
112 * @busa: Bus address to unmap when the WR completes
113 * @req: Keeps track of requests (send)
114 * @rc: Keepts track of replies (receive)
115 */
116struct p9_rdma_req;
117struct p9_rdma_context {
118 enum ib_wc_opcode wc_op;
119 dma_addr_t busa;
120 union {
121 struct p9_req_t *req;
122 struct p9_fcall *rc;
123 };
124};
125
126/**
127 * p9_rdma_opts - Collection of mount options
128 * @port: port of connection
129 * @sq_depth: The requested depth of the SQ. This really doesn't need
130 * to be any deeper than the number of threads used in the client
131 * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
132 * @timeout: Time to wait in msecs for CM events
133 */
134struct p9_rdma_opts {
135 short port;
136 int sq_depth;
137 int rq_depth;
138 long timeout;
139};
140
141/*
142 * Option Parsing (code inspired by NFS code)
143 */
144enum {
145 /* Options that take integer arguments */
146 Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, Opt_err,
147};
148
149static match_table_t tokens = {
150 {Opt_port, "port=%u"},
151 {Opt_sq_depth, "sq=%u"},
152 {Opt_rq_depth, "rq=%u"},
153 {Opt_timeout, "timeout=%u"},
154 {Opt_err, NULL},
155};
156
157/**
158 * parse_options - parse mount options into session structure
159 * @options: options string passed from mount
160 * @opts: transport-specific structure to parse options into
161 *
162 * Returns 0 upon success, -ERRNO upon failure
163 */
164static int parse_opts(char *params, struct p9_rdma_opts *opts)
165{
166 char *p;
167 substring_t args[MAX_OPT_ARGS];
168 int option;
169 char *options;
170 int ret;
171
172 opts->port = P9_PORT;
173 opts->sq_depth = P9_RDMA_SQ_DEPTH;
174 opts->rq_depth = P9_RDMA_RQ_DEPTH;
175 opts->timeout = P9_RDMA_TIMEOUT;
176
177 if (!params)
178 return 0;
179
180 options = kstrdup(params, GFP_KERNEL);
181 if (!options) {
182 P9_DPRINTK(P9_DEBUG_ERROR,
183 "failed to allocate copy of option string\n");
184 return -ENOMEM;
185 }
186
187 while ((p = strsep(&options, ",")) != NULL) {
188 int token;
189 int r;
190 if (!*p)
191 continue;
192 token = match_token(p, tokens, args);
193 r = match_int(&args[0], &option);
194 if (r < 0) {
195 P9_DPRINTK(P9_DEBUG_ERROR,
196 "integer field, but no integer?\n");
197 ret = r;
198 continue;
199 }
200 switch (token) {
201 case Opt_port:
202 opts->port = option;
203 break;
204 case Opt_sq_depth:
205 opts->sq_depth = option;
206 break;
207 case Opt_rq_depth:
208 opts->rq_depth = option;
209 break;
210 case Opt_timeout:
211 opts->timeout = option;
212 break;
213 default:
214 continue;
215 }
216 }
217 /* RQ must be at least as large as the SQ */
218 opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
219 kfree(options);
220 return 0;
221}
222
223static int
224p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
225{
226 struct p9_client *c = id->context;
227 struct p9_trans_rdma *rdma = c->trans;
228 switch (event->event) {
229 case RDMA_CM_EVENT_ADDR_RESOLVED:
230 BUG_ON(rdma->state != P9_RDMA_INIT);
231 rdma->state = P9_RDMA_ADDR_RESOLVED;
232 break;
233
234 case RDMA_CM_EVENT_ROUTE_RESOLVED:
235 BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
236 rdma->state = P9_RDMA_ROUTE_RESOLVED;
237 break;
238
239 case RDMA_CM_EVENT_ESTABLISHED:
240 BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
241 rdma->state = P9_RDMA_CONNECTED;
242 break;
243
244 case RDMA_CM_EVENT_DISCONNECTED:
245 if (rdma)
246 rdma->state = P9_RDMA_CLOSED;
247 if (c)
248 c->status = Disconnected;
249 break;
250
251 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
252 break;
253
254 case RDMA_CM_EVENT_ADDR_CHANGE:
255 case RDMA_CM_EVENT_ROUTE_ERROR:
256 case RDMA_CM_EVENT_DEVICE_REMOVAL:
257 case RDMA_CM_EVENT_MULTICAST_JOIN:
258 case RDMA_CM_EVENT_MULTICAST_ERROR:
259 case RDMA_CM_EVENT_REJECTED:
260 case RDMA_CM_EVENT_CONNECT_REQUEST:
261 case RDMA_CM_EVENT_CONNECT_RESPONSE:
262 case RDMA_CM_EVENT_CONNECT_ERROR:
263 case RDMA_CM_EVENT_ADDR_ERROR:
264 case RDMA_CM_EVENT_UNREACHABLE:
265 c->status = Disconnected;
266 rdma_disconnect(rdma->cm_id);
267 break;
268 default:
269 BUG();
270 }
271 complete(&rdma->cm_done);
272 return 0;
273}
274
275static void
276handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
277 struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
278{
279 struct p9_req_t *req;
280 int err = 0;
281 int16_t tag;
282
283 req = NULL;
284 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
285 DMA_FROM_DEVICE);
286
287 if (status != IB_WC_SUCCESS)
288 goto err_out;
289
290 err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
291 if (err)
292 goto err_out;
293
294 req = p9_tag_lookup(client, tag);
295 if (!req)
296 goto err_out;
297
298 req->rc = c->rc;
299 p9_client_cb(client, req);
300
301 return;
302
303 err_out:
304 P9_DPRINTK(P9_DEBUG_ERROR, "req %p err %d status %d\n",
305 req, err, status);
306 rdma->state = P9_RDMA_FLUSHING;
307 client->status = Disconnected;
308 return;
309}
310
311static void
312handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
313 struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
314{
315 ib_dma_unmap_single(rdma->cm_id->device,
316 c->busa, c->req->tc->size,
317 DMA_TO_DEVICE);
318}
319
320static void qp_event_handler(struct ib_event *event, void *context)
321{
322 P9_DPRINTK(P9_DEBUG_ERROR, "QP event %d context %p\n", event->event,
323 context);
324}
325
326static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
327{
328 struct p9_client *client = cq_context;
329 struct p9_trans_rdma *rdma = client->trans;
330 int ret;
331 struct ib_wc wc;
332
333 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
334 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
335 struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id;
336
337 switch (c->wc_op) {
338 case IB_WC_RECV:
339 atomic_dec(&rdma->rq_count);
340 handle_recv(client, rdma, c, wc.status, wc.byte_len);
341 break;
342
343 case IB_WC_SEND:
344 handle_send(client, rdma, c, wc.status, wc.byte_len);
345 up(&rdma->sq_sem);
346 break;
347
348 default:
349 printk(KERN_ERR "9prdma: unexpected completion type, "
350 "c->wc_op=%d, wc.opcode=%d, status=%d\n",
351 c->wc_op, wc.opcode, wc.status);
352 break;
353 }
354 kfree(c);
355 }
356}
357
358static void cq_event_handler(struct ib_event *e, void *v)
359{
360 P9_DPRINTK(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
361}
362
363static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
364{
365 if (!rdma)
366 return;
367
368 if (rdma->dma_mr && !IS_ERR(rdma->dma_mr))
369 ib_dereg_mr(rdma->dma_mr);
370
371 if (rdma->qp && !IS_ERR(rdma->qp))
372 ib_destroy_qp(rdma->qp);
373
374 if (rdma->pd && !IS_ERR(rdma->pd))
375 ib_dealloc_pd(rdma->pd);
376
377 if (rdma->cq && !IS_ERR(rdma->cq))
378 ib_destroy_cq(rdma->cq);
379
380 if (rdma->cm_id && !IS_ERR(rdma->cm_id))
381 rdma_destroy_id(rdma->cm_id);
382
383 kfree(rdma);
384}
385
386static int
387post_recv(struct p9_client *client, struct p9_rdma_context *c)
388{
389 struct p9_trans_rdma *rdma = client->trans;
390 struct ib_recv_wr wr, *bad_wr;
391 struct ib_sge sge;
392
393 c->busa = ib_dma_map_single(rdma->cm_id->device,
394 c->rc->sdata, client->msize,
395 DMA_FROM_DEVICE);
396 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
397 goto error;
398
399 sge.addr = c->busa;
400 sge.length = client->msize;
401 sge.lkey = rdma->lkey;
402
403 wr.next = NULL;
404 c->wc_op = IB_WC_RECV;
405 wr.wr_id = (unsigned long) c;
406 wr.sg_list = &sge;
407 wr.num_sge = 1;
408 return ib_post_recv(rdma->qp, &wr, &bad_wr);
409
410 error:
411 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
412 return -EIO;
413}
414
415static int rdma_request(struct p9_client *client, struct p9_req_t *req)
416{
417 struct p9_trans_rdma *rdma = client->trans;
418 struct ib_send_wr wr, *bad_wr;
419 struct ib_sge sge;
420 int err = 0;
421 unsigned long flags;
422 struct p9_rdma_context *c = NULL;
423 struct p9_rdma_context *rpl_context = NULL;
424
425 /* Allocate an fcall for the reply */
426 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
427 if (!rpl_context)
428 goto err_close;
429
430 /*
431 * If the request has a buffer, steal it, otherwise
432 * allocate a new one. Typically, requests should already
433 * have receive buffers allocated and just swap them around
434 */
435 if (!req->rc) {
436 req->rc = kmalloc(sizeof(struct p9_fcall)+client->msize,
437 GFP_KERNEL);
438 if (req->rc) {
439 req->rc->sdata = (char *) req->rc +
440 sizeof(struct p9_fcall);
441 req->rc->capacity = client->msize;
442 }
443 }
444 rpl_context->rc = req->rc;
445 if (!rpl_context->rc) {
446 kfree(rpl_context);
447 goto err_close;
448 }
449
450 /*
451 * Post a receive buffer for this request. We need to ensure
452 * there is a reply buffer available for every outstanding
453 * request. A flushed request can result in no reply for an
454 * outstanding request, so we must keep a count to avoid
455 * overflowing the RQ.
456 */
457 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
458 err = post_recv(client, rpl_context);
459 if (err) {
460 kfree(rpl_context->rc);
461 kfree(rpl_context);
462 goto err_close;
463 }
464 } else
465 atomic_dec(&rdma->rq_count);
466
467 /* remove posted receive buffer from request structure */
468 req->rc = NULL;
469
470 /* Post the request */
471 c = kmalloc(sizeof *c, GFP_KERNEL);
472 if (!c)
473 goto err_close;
474 c->req = req;
475
476 c->busa = ib_dma_map_single(rdma->cm_id->device,
477 c->req->tc->sdata, c->req->tc->size,
478 DMA_TO_DEVICE);
479 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
480 goto error;
481
482 sge.addr = c->busa;
483 sge.length = c->req->tc->size;
484 sge.lkey = rdma->lkey;
485
486 wr.next = NULL;
487 c->wc_op = IB_WC_SEND;
488 wr.wr_id = (unsigned long) c;
489 wr.opcode = IB_WR_SEND;
490 wr.send_flags = IB_SEND_SIGNALED;
491 wr.sg_list = &sge;
492 wr.num_sge = 1;
493
494 if (down_interruptible(&rdma->sq_sem))
495 goto error;
496
497 return ib_post_send(rdma->qp, &wr, &bad_wr);
498
499 error:
500 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
501 return -EIO;
502
503 err_close:
504 spin_lock_irqsave(&rdma->req_lock, flags);
505 if (rdma->state < P9_RDMA_CLOSING) {
506 rdma->state = P9_RDMA_CLOSING;
507 spin_unlock_irqrestore(&rdma->req_lock, flags);
508 rdma_disconnect(rdma->cm_id);
509 } else
510 spin_unlock_irqrestore(&rdma->req_lock, flags);
511 return err;
512}
513
514static void rdma_close(struct p9_client *client)
515{
516 struct p9_trans_rdma *rdma;
517
518 if (!client)
519 return;
520
521 rdma = client->trans;
522 if (!rdma)
523 return;
524
525 client->status = Disconnected;
526 rdma_disconnect(rdma->cm_id);
527 rdma_destroy_trans(rdma);
528}
529
530/**
531 * alloc_rdma - Allocate and initialize the rdma transport structure
532 * @msize: MTU
533 * @dotu: Extension attribute
534 * @opts: Mount options structure
535 */
536static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
537{
538 struct p9_trans_rdma *rdma;
539
540 rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
541 if (!rdma)
542 return NULL;
543
544 rdma->sq_depth = opts->sq_depth;
545 rdma->rq_depth = opts->rq_depth;
546 rdma->timeout = opts->timeout;
547 spin_lock_init(&rdma->req_lock);
548 init_completion(&rdma->cm_done);
549 sema_init(&rdma->sq_sem, rdma->sq_depth);
550 atomic_set(&rdma->rq_count, 0);
551
552 return rdma;
553}
554
555/* its not clear to me we can do anything after send has been posted */
556static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
557{
558 return 1;
559}
560
561/**
562 * trans_create_rdma - Transport method for creating atransport instance
563 * @client: client instance
564 * @addr: IP address string
565 * @args: Mount options string
566 */
567static int
568rdma_create_trans(struct p9_client *client, const char *addr, char *args)
569{
570 int err;
571 struct p9_rdma_opts opts;
572 struct p9_trans_rdma *rdma;
573 struct rdma_conn_param conn_param;
574 struct ib_qp_init_attr qp_attr;
575 struct ib_device_attr devattr;
576
577 /* Parse the transport specific mount options */
578 err = parse_opts(args, &opts);
579 if (err < 0)
580 return err;
581
582 /* Create and initialize the RDMA transport structure */
583 rdma = alloc_rdma(&opts);
584 if (!rdma)
585 return -ENOMEM;
586
587 /* Create the RDMA CM ID */
588 rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP);
589 if (IS_ERR(rdma->cm_id))
590 goto error;
591
592 /* Resolve the server's address */
593 rdma->addr.sin_family = AF_INET;
594 rdma->addr.sin_addr.s_addr = in_aton(addr);
595 rdma->addr.sin_port = htons(opts.port);
596 err = rdma_resolve_addr(rdma->cm_id, NULL,
597 (struct sockaddr *)&rdma->addr,
598 rdma->timeout);
599 if (err)
600 goto error;
601 err = wait_for_completion_interruptible(&rdma->cm_done);
602 if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
603 goto error;
604
605 /* Resolve the route to the server */
606 err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
607 if (err)
608 goto error;
609 err = wait_for_completion_interruptible(&rdma->cm_done);
610 if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
611 goto error;
612
613 /* Query the device attributes */
614 err = ib_query_device(rdma->cm_id->device, &devattr);
615 if (err)
616 goto error;
617
618 /* Create the Completion Queue */
619 rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
620 cq_event_handler, client,
621 opts.sq_depth + opts.rq_depth + 1, 0);
622 if (IS_ERR(rdma->cq))
623 goto error;
624 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
625
626 /* Create the Protection Domain */
627 rdma->pd = ib_alloc_pd(rdma->cm_id->device);
628 if (IS_ERR(rdma->pd))
629 goto error;
630
631 /* Cache the DMA lkey in the transport */
632 rdma->dma_mr = NULL;
633 if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
634 rdma->lkey = rdma->cm_id->device->local_dma_lkey;
635 else {
636 rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE);
637 if (IS_ERR(rdma->dma_mr))
638 goto error;
639 rdma->lkey = rdma->dma_mr->lkey;
640 }
641
642 /* Create the Queue Pair */
643 memset(&qp_attr, 0, sizeof qp_attr);
644 qp_attr.event_handler = qp_event_handler;
645 qp_attr.qp_context = client;
646 qp_attr.cap.max_send_wr = opts.sq_depth;
647 qp_attr.cap.max_recv_wr = opts.rq_depth;
648 qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE;
649 qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE;
650 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
651 qp_attr.qp_type = IB_QPT_RC;
652 qp_attr.send_cq = rdma->cq;
653 qp_attr.recv_cq = rdma->cq;
654 err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
655 if (err)
656 goto error;
657 rdma->qp = rdma->cm_id->qp;
658
659 /* Request a connection */
660 memset(&conn_param, 0, sizeof(conn_param));
661 conn_param.private_data = NULL;
662 conn_param.private_data_len = 0;
663 conn_param.responder_resources = P9_RDMA_IRD;
664 conn_param.initiator_depth = P9_RDMA_ORD;
665 err = rdma_connect(rdma->cm_id, &conn_param);
666 if (err)
667 goto error;
668 err = wait_for_completion_interruptible(&rdma->cm_done);
669 if (err || (rdma->state != P9_RDMA_CONNECTED))
670 goto error;
671
672 client->trans = rdma;
673 client->status = Connected;
674
675 return 0;
676
677error:
678 rdma_destroy_trans(rdma);
679 return -ENOTCONN;
680}
681
682static struct p9_trans_module p9_rdma_trans = {
683 .name = "rdma",
684 .maxsize = P9_RDMA_MAXSIZE,
685 .def = 0,
686 .owner = THIS_MODULE,
687 .create = rdma_create_trans,
688 .close = rdma_close,
689 .request = rdma_request,
690 .cancel = rdma_cancel,
691};
692
693/**
694 * p9_trans_rdma_init - Register the 9P RDMA transport driver
695 */
696static int __init p9_trans_rdma_init(void)
697{
698 v9fs_register_trans(&p9_rdma_trans);
699 return 0;
700}
701
702static void __exit p9_trans_rdma_exit(void)
703{
704 v9fs_unregister_trans(&p9_rdma_trans);
705}
706
707module_init(p9_trans_rdma_init);
708module_exit(p9_trans_rdma_exit);
709
710MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
711MODULE_DESCRIPTION("RDMA Transport for 9P");
712MODULE_LICENSE("Dual BSD/GPL");